diff --git a/go.mod b/go.mod
index 16f47a09c14..4317f8decbc 100644
--- a/go.mod
+++ b/go.mod
@@ -6,8 +6,10 @@ require (
cloud.google.com/go/monitoring v1.6.0
github.com/AlecAivazis/survey/v2 v2.3.5
github.com/Azure/azure-sdk-for-go v51.2.0+incompatible
- github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.3.0
- github.com/Azure/go-autorest/autorest v0.11.27
+ github.com/Azure/azure-sdk-for-go/sdk/azcore v1.1.3
+ github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.1.0
+ github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.4.1
+ github.com/Azure/go-autorest/autorest v0.11.28
github.com/Azure/go-autorest/autorest/azure/auth v0.5.1
github.com/Azure/go-autorest/autorest/to v0.4.0
github.com/IBM-Cloud/bluemix-go v0.0.0-20211102075456-ffc4e11dfb16
@@ -40,6 +42,7 @@ require (
github.com/gophercloud/utils v0.0.0-20220307143606-8e7800759d16
github.com/h2non/filetype v1.0.12
github.com/hashicorp/terraform-exec v0.16.1
+ github.com/jongio/azidext/go/azidext v0.4.0
github.com/kdomanski/iso9660 v0.2.1
github.com/libvirt/libvirt-go v5.10.0+incompatible
github.com/metal3-io/baremetal-operator v0.0.0-20220128094204-28771f489634
@@ -75,10 +78,10 @@ require (
github.com/ulikunitz/xz v0.5.10
github.com/vincent-petithory/dataurl v1.0.0
github.com/vmware/govmomi v0.27.4
- golang.org/x/crypto v0.0.0-20220315160706-3147a52a75dd
+ golang.org/x/crypto v0.0.0-20220926161630-eccd6366d1be
golang.org/x/lint v0.0.0-20210508222113-6edffad5e616
golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2
- golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab
+ golang.org/x/sys v0.0.0-20220928140112-f11e5e49a4ec
google.golang.org/api v0.91.0
google.golang.org/genproto v0.0.0-20220808131553-a91ffa7f803e
google.golang.org/grpc v1.48.0
@@ -122,15 +125,15 @@ require (
require (
cloud.google.com/go/compute v1.7.0 // indirect
- github.com/Azure/azure-sdk-for-go/sdk/azcore v0.21.1 // indirect
- github.com/Azure/azure-sdk-for-go/sdk/internal v0.9.2 // indirect
+ github.com/Azure/azure-sdk-for-go/sdk/internal v1.0.1 // indirect
github.com/Azure/go-autorest v14.2.0+incompatible // indirect
- github.com/Azure/go-autorest/autorest/adal v0.9.20 // indirect
+ github.com/Azure/go-autorest/autorest/adal v0.9.21 // indirect
github.com/Azure/go-autorest/autorest/azure/cli v0.4.0 // indirect
github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect
github.com/Azure/go-autorest/autorest/validation v0.3.1 // indirect
github.com/Azure/go-autorest/logger v0.2.1 // indirect
github.com/Azure/go-autorest/tracing v0.6.0 // indirect
+ github.com/AzureAD/microsoft-authentication-library-for-go v0.7.0 // indirect
github.com/BurntSushi/toml v0.3.1 // indirect
github.com/PaesslerAG/gval v1.0.0 // indirect
github.com/PaesslerAG/jsonpath v0.1.1 // indirect
@@ -188,6 +191,7 @@ require (
github.com/json-iterator/go v1.1.12 // indirect
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect
github.com/kr/fs v0.1.0 // indirect
+ github.com/kylelemons/godebug v1.1.0 // indirect
github.com/leodido/go-urn v1.2.1 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
@@ -206,6 +210,7 @@ require (
github.com/openshift/custom-resource-status v1.1.2 // indirect
github.com/opentracing/opentracing-go v1.2.0 // indirect
github.com/pierrec/lz4 v2.3.0+incompatible // indirect
+ github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect
github.com/pkg/xattr v0.4.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/prometheus/client_model v0.2.0 // indirect
@@ -214,7 +219,7 @@ require (
go.uber.org/atomic v1.7.0 // indirect
go.uber.org/multierr v1.6.0 // indirect
go.uber.org/zap v1.21.0 // indirect
- golang.org/x/net v0.0.0-20220812174116-3211cb980234 // indirect
+ golang.org/x/net v0.0.0-20221004154528-8021a29435af // indirect
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 // indirect
gopkg.in/djherbis/times.v1 v1.2.0 // indirect
gopkg.in/gcfg.v1 v1.2.3 // indirect
diff --git a/go.sum b/go.sum
index 896b744808e..dba6dc0f37e 100644
--- a/go.sum
+++ b/go.sum
@@ -74,26 +74,29 @@ github.com/AlecAivazis/survey/v2 v2.3.5 h1:A8cYupsAZkjaUmhtTYv3sSqc7LO5mp1XDfqe5
github.com/AlecAivazis/survey/v2 v2.3.5/go.mod h1:4AuI9b7RjAR+G7v9+C4YSlX/YL3K3cWNXgWXOhllqvI=
github.com/Azure/azure-sdk-for-go v51.2.0+incompatible h1:qQNk//OOHK0GZcgMMgdJ4tZuuh0zcOeUkpTxjvKFpSQ=
github.com/Azure/azure-sdk-for-go v51.2.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
-github.com/Azure/azure-sdk-for-go/sdk/azcore v0.21.1 h1:qoVeMsc9/fh/yhxVaA0obYjVH/oI/ihrOoMwsLS9KSA=
-github.com/Azure/azure-sdk-for-go/sdk/azcore v0.21.1/go.mod h1:fBF9PQNqB8scdgpZ3ufzaLntG0AG7C1WjPMsiFOmfHM=
-github.com/Azure/azure-sdk-for-go/sdk/internal v0.8.3/go.mod h1:KLF4gFr6DcKFZwSuH8w8yEK6DpFl3LP5rhdvAb7Yz5I=
-github.com/Azure/azure-sdk-for-go/sdk/internal v0.9.2 h1:Px2KVERcYEg2Lv25AqC2hVr0xUWaq94wuEObLIkYzmA=
-github.com/Azure/azure-sdk-for-go/sdk/internal v0.9.2/go.mod h1:CdSJQNNzZhCkwDaV27XV1w48ZBPtxe7mlrZAsPNxD5g=
-github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.3.0 h1:Px2UA+2RvSSvv+RvJNuUB6n7rs5Wsel4dXLe90Um2n4=
-github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.3.0/go.mod h1:tPaiy8S5bQ+S5sOiDlINkp7+Ef339+Nz5L5XO+cnOHo=
+github.com/Azure/azure-sdk-for-go/sdk/azcore v1.1.3 h1:8LoU8N2lIUzkmstvwXvVfniMZlFbesfT2AmA1aqvRr8=
+github.com/Azure/azure-sdk-for-go/sdk/azcore v1.1.3/go.mod h1:uGG2W01BaETf0Ozp+QxxKJdMBNRWPdstHG0Fmdwn1/U=
+github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.1.0 h1:QkAcEIAKbNL4KoFr4SathZPhDhF4mVwpBMFlYjyAqy8=
+github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.1.0/go.mod h1:bhXu1AjYL+wutSL/kpSq6s7733q2Rb0yuot9Zgfqa/0=
+github.com/Azure/azure-sdk-for-go/sdk/internal v1.0.1 h1:XUNQ4mw+zJmaA2KXzP9JlQiecy1SI+Eog7xVkPiqIbg=
+github.com/Azure/azure-sdk-for-go/sdk/internal v1.0.1/go.mod h1:eWRD7oawr1Mu1sLCawqVc0CUiF43ia3qQMxLscsKQ9w=
+github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.4.1 h1:QSdcrd/UFJv6Bp/CfoVf2SrENpFn9P6Yh8yb+xNhYMM=
+github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.4.1/go.mod h1:eZ4g6GUvXiGulfIbbhh1Xr4XwUYaYaWMqzGD/284wCA=
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8=
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=
github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
github.com/Azure/go-autorest/autorest v0.11.0/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw=
-github.com/Azure/go-autorest/autorest v0.11.27 h1:F3R3q42aWytozkV8ihzcgMO4OA4cuqr3bNlsEuF6//A=
github.com/Azure/go-autorest/autorest v0.11.27/go.mod h1:7l8ybrIdUmGqZMTD0sRtAr8NvbHjfofbf8RSP2q7w7U=
+github.com/Azure/go-autorest/autorest v0.11.28 h1:ndAExarwr5Y+GaHE6VCaY1kyS/HwwGGyuimVhWsHOEM=
+github.com/Azure/go-autorest/autorest v0.11.28/go.mod h1:MrkzG3Y3AH668QyF9KRk5neJnGgmhQ6krbhR8Q5eMvA=
github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg=
github.com/Azure/go-autorest/autorest/adal v0.9.2/go.mod h1:/3SMAM86bP6wC9Ev35peQDUeqFZBMH07vvUOmg4z/fE=
github.com/Azure/go-autorest/autorest/adal v0.9.18/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ=
-github.com/Azure/go-autorest/autorest/adal v0.9.20 h1:gJ3E98kMpFB1MFqQCvA1yFab8vthOeD4VlFRQULxahg=
github.com/Azure/go-autorest/autorest/adal v0.9.20/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ=
+github.com/Azure/go-autorest/autorest/adal v0.9.21 h1:jjQnVFXPfekaqb8vIsv2G1lxshoW+oGv4MDlhRtnYZk=
+github.com/Azure/go-autorest/autorest/adal v0.9.21/go.mod h1:zua7mBUaCc5YnSLKYgGJR/w5ePdMDA6H56upLsHzA9U=
github.com/Azure/go-autorest/autorest/azure/auth v0.5.1 h1:bvUhZciHydpBxBmCheUgxxbSwJy7xcfjkUsjUcqSojc=
github.com/Azure/go-autorest/autorest/azure/auth v0.5.1/go.mod h1:ea90/jvmnAwDrSooLH4sRIehEPtG/EPUXavDh31MnA4=
github.com/Azure/go-autorest/autorest/azure/cli v0.4.0 h1:Ml+UCrnlKD+cJmSzrZ/RDcDw86NjkRUpnFh7V5JUhzU=
@@ -113,6 +116,8 @@ github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+Z
github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo=
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
+github.com/AzureAD/microsoft-authentication-library-for-go v0.7.0 h1:VgSJlZH5u0k2qxSpqyghcFQKmvYckj46uymKK5XzkBM=
+github.com/AzureAD/microsoft-authentication-library-for-go v0.7.0/go.mod h1:BDJ5qMFKx9DugEg3+uQSDCdbYPr5s9vBTrL9P8TpqOU=
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
@@ -338,9 +343,7 @@ github.com/dimchansky/utfbom v1.1.0 h1:FcM3g+nofKgUteL8dm/UpdRXNC9KmADgTpLKsu0TR
github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8=
github.com/diskfs/go-diskfs v1.2.1-0.20210727185522-a769efacd235 h1:+NFKI4ptfB3AKeut6a538wanUHOKEMwZfznBZZ6a5Qc=
github.com/diskfs/go-diskfs v1.2.1-0.20210727185522-a769efacd235/go.mod h1:IoDpuEbpS+D+yCGdoOm6GNfyTeEws77ALvcMQFxmenw=
-github.com/dnaeon/go-vcr v1.1.0/go.mod h1:M7tiix8f0r6mKKJ3Yq/kqU1OYf3MnfmBWVbPx/yU9ko=
-github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI=
-github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ=
+github.com/dnaeon/go-vcr v1.1.0 h1:ReYa/UBrRyQdant9B4fNHGoCNKw6qh6P0fsdGmZpR7c=
github.com/docker/cli v20.10.14+incompatible h1:dSBKJOVesDgHo7rbxlYjYsXe7gPzrTT+/cKQgpDAazg=
github.com/docker/distribution v0.0.0-20180920194744-16128bbac47f/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
@@ -913,6 +916,8 @@ github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc=
github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg=
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8=
+github.com/jongio/azidext/go/azidext v0.4.0 h1:TOYyVFMeWGgXNhURSgrEtUCu7JAAKgsy+5C4+AEfYlw=
+github.com/jongio/azidext/go/azidext v0.4.0/go.mod h1:VrlpGde5B+pPbTUxnThE5UIQQkcebdr3jrC2MmlMVSI=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
@@ -976,6 +981,8 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/krishicks/yaml-patch v0.0.10 h1:H4FcHpnNwVmw8u0MjPRjWyIXtco6zM2F78t+57oNM3E=
github.com/kubernetes-sigs/kube-storage-version-migrator v0.0.0-20191127225502-51849bc15f17/go.mod h1:enH0BVV+4+DAgWdwSlMefG8bBzTfVMTr1lApzdLZ/cc=
github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k=
+github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
+github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
github.com/labstack/echo/v4 v4.1.17/go.mod h1:Tn2yRQL/UclUalpb5rPdXDevbkJ+lp/2svdyFBg6CHQ=
github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k=
github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII=
@@ -1065,7 +1072,6 @@ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lN
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
-github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5/go.mod h1:caMODM3PzxT8aQXRPkAt8xlV/e7d7w8GM5g0fa5F0D8=
github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc=
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
github.com/mozilla/tls-observatory v0.0.0-20180409132520-8791a200eb40/go.mod h1:SrKMQvPiws7F7iqYp8/TX+IhxCYhzr6N/1yb8cwHsGk=
@@ -1209,6 +1215,8 @@ github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR
github.com/pierrec/lz4 v2.3.0+incompatible h1:CZzRn4Ut9GbUkHlQ7jqBXeZQV41ZSKWFc302ZU6lUTk=
github.com/pierrec/lz4 v2.3.0+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
github.com/pin/tftp v2.1.0+incompatible/go.mod h1:xVpZOMCXTy+A5QMjEVN0Glwa1sUvaJhFXbr/aAxuxGY=
+github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU=
+github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
@@ -1631,7 +1639,6 @@ golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81R
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/net v0.0.0-20201010224723-4f7140c49acb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
@@ -1647,7 +1654,6 @@ golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1
golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
@@ -1662,8 +1668,8 @@ golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su
golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
-golang.org/x/net v0.0.0-20220812174116-3211cb980234 h1:RDqmgfe7SvlMWoqC3xwQ2blLO3fcWcxMa3eBLRdRW7E=
-golang.org/x/net v0.0.0-20220812174116-3211cb980234/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
+golang.org/x/net v0.0.0-20221004154528-8021a29435af h1:wv66FM3rLZGPdxpYL+ApnDe2HzHcTFta3z5nsc13wI4=
+golang.org/x/net v0.0.0-20221004154528-8021a29435af/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
@@ -1797,6 +1803,7 @@ golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@@ -1823,8 +1830,9 @@ golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab h1:2QkjZIsXupsJbJIdSjjUOgWK3aEtzyuh2mPt3l/CkeU=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220928140112-f11e5e49a4ec h1:BkDtF2Ih9xZ7le9ndzTA7KJow28VbQW3odyk/8drmuI=
+golang.org/x/sys v0.0.0-20220928140112-f11e5e49a4ec/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210503060354-a79de5458b56/go.mod h1:tfny5GFUkzUvx4ps4ajbZsCe5lw1metzhBm9T3x7oIY=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY=
diff --git a/pkg/asset/installconfig/azure/session.go b/pkg/asset/installconfig/azure/session.go
index e9bd4856aee..0458287b4f7 100644
--- a/pkg/asset/installconfig/azure/session.go
+++ b/pkg/asset/installconfig/azure/session.go
@@ -4,12 +4,17 @@ import (
"encoding/json"
"os"
"path/filepath"
+ "strings"
"sync"
"github.com/AlecAivazis/survey/v2"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud"
+ "github.com/Azure/azure-sdk-for-go/sdk/azidentity"
"github.com/Azure/go-autorest/autorest"
azureenv "github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/autorest/azure/auth"
+ "github.com/jongio/azidext/go/azidext"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
@@ -63,6 +68,26 @@ func GetSessionWithCredentials(cloudName azure.CloudEnvironment, armEndpoint str
return nil, errors.Wrapf(err, "failed to get Azure environment for the %q cloud", cloudName)
}
+ var cloudConfig cloud.Configuration
+ switch cloudName {
+ case azure.StackCloud:
+ cloudConfig = cloud.Configuration{
+ ActiveDirectoryAuthorityHost: cloudEnv.ActiveDirectoryEndpoint,
+ Services: map[cloud.ServiceName]cloud.ServiceConfiguration{
+ cloud.ResourceManager: {
+ Audience: cloudEnv.TokenAudience,
+ Endpoint: cloudEnv.ResourceManagerEndpoint,
+ },
+ },
+ }
+ case azure.USGovernmentCloud:
+ cloudConfig = cloud.AzureGovernment
+ case azure.ChinaCloud:
+ cloudConfig = cloud.AzureChina
+ default:
+ cloudConfig = cloud.AzurePublic
+ }
+
if credentials == nil {
credentials, err = credentialsFromFileOrUser(&cloudEnv)
if err != nil {
@@ -70,9 +95,9 @@ func GetSessionWithCredentials(cloudName azure.CloudEnvironment, armEndpoint str
}
}
if credentials.ClientCertificatePath != "" {
- return newSessionFromCertificates(cloudEnv, credentials)
+ return newSessionFromCertificates(cloudEnv, credentials, cloudConfig)
}
- return newSessionFromCredentials(cloudEnv, credentials)
+ return newSessionFromCredentials(cloudEnv, credentials, cloudConfig)
}
// credentialsFromFileOrUser returns credentials found
@@ -226,24 +251,27 @@ func saveCredentials(credentials Credentials, filePath string) error {
return os.WriteFile(filePath, jsonCreds, 0o600)
}
-func newSessionFromCredentials(cloudEnv azureenv.Environment, credentials *Credentials) (*Session, error) {
- c := &auth.ClientCredentialsConfig{
- TenantID: credentials.TenantID,
- ClientID: credentials.ClientID,
- ClientSecret: credentials.ClientSecret,
- AADEndpoint: cloudEnv.ActiveDirectoryEndpoint,
- }
- c.Resource = cloudEnv.TokenAudience
- authorizer, err := c.Authorizer()
- if err != nil {
- return nil, errors.Wrap(err, "failed to get client credentials authorizer")
+func newSessionFromCredentials(cloudEnv azureenv.Environment, credentials *Credentials, cloudConfig cloud.Configuration) (*Session, error) {
+ options := azidentity.ClientSecretCredentialOptions{
+ ClientOptions: azcore.ClientOptions{
+ Cloud: cloudConfig,
+ },
}
- c.Resource = cloudEnv.GraphEndpoint
- graphAuthorizer, err := c.Authorizer()
+ cred, err := azidentity.NewClientSecretCredential(credentials.TenantID, credentials.ClientID, credentials.ClientSecret, &options)
if err != nil {
- return nil, errors.Wrap(err, "failed to get GraphEndpoint authorizer")
+ return nil, errors.Wrap(err, "failed to get client credentials from secret")
}
+
+ // Use an adapter so azidentity in the Azure SDK can be used as
+ // Authorizer when calling the Azure Management Packages, which we
+ // currently use. Once the Azure SDK clients (found in /sdk) move to
+ // stable, we can update our clients and they will be able to use the
+ // creds directly without the authorizer. The schedule is here:
+ // https://azure.github.io/azure-sdk/releases/latest/index.html#go
+ authorizer := azidext.NewTokenCredentialAdapter(cred, []string{endpointToScope(cloudEnv.TokenAudience)})
+ graphAuthorizer := azidext.NewTokenCredentialAdapter(cred, []string{endpointToScope(cloudEnv.GraphEndpoint)})
+
return &Session{
GraphAuthorizer: graphAuthorizer,
Authorizer: authorizer,
@@ -252,25 +280,42 @@ func newSessionFromCredentials(cloudEnv azureenv.Environment, credentials *Crede
}, nil
}
-func newSessionFromCertificates(cloudEnv azureenv.Environment, credentials *Credentials) (*Session, error) {
- c := &auth.ClientCertificateConfig{
- TenantID: credentials.TenantID,
- ClientID: credentials.ClientID,
- CertificatePath: credentials.ClientCertificatePath,
- CertificatePassword: credentials.ClientCertificatePassword,
- AADEndpoint: cloudEnv.ActiveDirectoryEndpoint,
+func newSessionFromCertificates(cloudEnv azureenv.Environment, credentials *Credentials, cloudConfig cloud.Configuration) (*Session, error) {
+ options := azidentity.ClientCertificateCredentialOptions{
+ ClientOptions: azcore.ClientOptions{
+ Cloud: cloudConfig,
+ },
+ }
+
+ data, err := os.ReadFile(credentials.ClientCertificatePath)
+ if err != nil {
+ return nil, errors.Wrap(err, "failed to read client certificate file")
}
- c.Resource = cloudEnv.TokenAudience
- authorizer, err := c.Authorizer()
+
+ // NewClientCertificateCredential requires at least one *x509.Certificate,
+ // and a crypto.PrivateKey. ParseCertificates returns these given
+ // certificate data in PEM or PKCS12 format. It handles common scenarios
+ // but has limitations, for example it doesn't load PEM encrypted private
+ // keys.
+ certs, key, err := azidentity.ParseCertificates(data, nil)
if err != nil {
- return nil, errors.Wrap(err, "failed to get client credentials authorizer")
+ return nil, errors.Wrap(err, "failed to parse client certificate")
}
- c.Resource = cloudEnv.GraphEndpoint
- graphAuthorizer, err := c.Authorizer()
+ cred, err := azidentity.NewClientCertificateCredential(credentials.TenantID, credentials.ClientID, certs, key, &options)
if err != nil {
- return nil, errors.Wrap(err, "failed to get GraphEndpoint authorizer")
+ return nil, errors.Wrap(err, "failed to get client credentials from certificate")
}
+
+ // Use an adapter so azidentity in the Azure SDK can be used as
+ // Authorizer when calling the Azure Management Packages, which we
+ // currently use. Once the Azure SDK clients (found in /sdk) move to
+ // stable, we can update our clients and they will be able to use the
+ // creds directly without the authorizer. The schedule is here:
+ // https://azure.github.io/azure-sdk/releases/latest/index.html#go
+ authorizer := azidext.NewTokenCredentialAdapter(cred, []string{endpointToScope(cloudEnv.TokenAudience)})
+ graphAuthorizer := azidext.NewTokenCredentialAdapter(cred, []string{endpointToScope(cloudEnv.GraphEndpoint)})
+
return &Session{
GraphAuthorizer: graphAuthorizer,
Authorizer: authorizer,
@@ -278,3 +323,10 @@ func newSessionFromCertificates(cloudEnv azureenv.Environment, credentials *Cred
Environment: cloudEnv,
}, nil
}
+
+func endpointToScope(endpoint string) string {
+ if !strings.HasSuffix(endpoint, "/.default") {
+ endpoint += "/.default"
+ }
+ return endpoint
+}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md
index d964e4494e1..1708e3b929f 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md
@@ -1,5 +1,100 @@
# Release History
+## 1.1.3 (2022-09-01)
+
+### Bugs Fixed
+* Adjusted the initial retry delay to 800ms per the Azure SDK guidelines.
+
+## 1.1.2 (2022-08-09)
+
+### Other Changes
+* Fixed various doc bugs.
+
+## 1.1.1 (2022-06-30)
+
+### Bugs Fixed
+* Avoid polling when a RELO LRO synchronously terminates.
+
+## 1.1.0 (2022-06-03)
+
+### Other Changes
+* The one-second floor for `Frequency` when calling `PollUntilDone()` has been removed when running tests.
+
+## 1.0.0 (2022-05-12)
+
+### Features Added
+* Added interface `runtime.PollingHandler` to support custom poller implementations.
+ * Added field `PollingHandler` of this type to `runtime.NewPollerOptions[T]` and `runtime.NewPollerFromResumeTokenOptions[T]`.
+
+### Breaking Changes
+* Renamed `cloud.Configuration.LoginEndpoint` to `.ActiveDirectoryAuthorityHost`
+* Renamed `cloud.AzurePublicCloud` to `cloud.AzurePublic`
+* Removed `AuxiliaryTenants` field from `arm/ClientOptions` and `arm/policy/BearerTokenOptions`
+* Removed `TokenRequestOptions.TenantID`
+* `Poller[T].PollUntilDone()` now takes an `options *PollUntilDoneOptions` param instead of `freq time.Duration`
+* Removed `arm/runtime.Poller[T]`, `arm/runtime.NewPoller[T]()` and `arm/runtime.NewPollerFromResumeToken[T]()`
+* Removed `arm/runtime.FinalStateVia` and related `const` values
+* Renamed `runtime.PageProcessor` to `runtime.PagingHandler`
+* The `arm/runtime.ProviderRepsonse` and `arm/runtime.Provider` types are no longer exported.
+* Renamed `NewRequestIdPolicy()` to `NewRequestIDPolicy()`
+* `TokenCredential.GetToken` now returns `AccessToken` by value.
+
+### Bugs Fixed
+* When per-try timeouts are enabled, only cancel the context after the body has been read and closed.
+* The `Operation-Location` poller now properly handles `final-state-via` values.
+* Improvements in `runtime.Poller[T]`
+ * `Poll()` shouldn't cache errors, allowing for additional retries when in a non-terminal state.
+ * `Result()` will cache the terminal result or error but not transient errors, allowing for additional retries.
+
+### Other Changes
+* Updated to latest `internal` module and absorbed breaking changes.
+ * Use `temporal.Resource` and deleted copy.
+* The internal poller implementation has been refactored.
+ * The implementation in `internal/pollers/poller.go` has been merged into `runtime/poller.go` with some slight modification.
+ * The internal poller types had their methods updated to conform to the `runtime.PollingHandler` interface.
+ * The creation of resume tokens has been refactored so that implementers of `runtime.PollingHandler` don't need to know about it.
+* `NewPipeline()` places policies from `ClientOptions` after policies from `PipelineOptions`
+* Default User-Agent headers no longer include `azcore` version information
+
+## 0.23.1 (2022-04-14)
+
+### Bugs Fixed
+* Include XML header when marshalling XML content.
+* Handle XML namespaces when searching for error code.
+* Handle `odata.error` when searching for error code.
+
+## 0.23.0 (2022-04-04)
+
+### Features Added
+* Added `runtime.Pager[T any]` and `runtime.Poller[T any]` supporting types for central, generic, implementations.
+* Added `cloud` package with a new API for cloud configuration
+* Added `FinalStateVia` field to `runtime.NewPollerOptions[T any]` type.
+
+### Breaking Changes
+* Removed the `Poller` type-alias to the internal poller implementation.
+* Added `Ptr[T any]` and `SliceOfPtrs[T any]` in the `to` package and removed all non-generic implementations.
+* `NullValue` and `IsNullValue` now take a generic type parameter instead of an interface func parameter.
+* Replaced `arm.Endpoint` with `cloud` API
+ * Removed the `endpoint` parameter from `NewRPRegistrationPolicy()`
+ * `arm/runtime.NewPipeline()` and `.NewRPRegistrationPolicy()` now return an `error`
+* Refactored `NewPoller` and `NewPollerFromResumeToken` funcs in `arm/runtime` and `runtime` packages.
+ * Removed the `pollerID` parameter as it's no longer required.
+ * Created optional parameter structs and moved optional parameters into them.
+* Changed `FinalStateVia` field to a `const` type.
+
+### Other Changes
+* Converted expiring resource and dependent types to use generics.
+
+## 0.22.0 (2022-03-03)
+
+### Features Added
+* Added header `WWW-Authenticate` to the default allow-list of headers for logging.
+* Added a pipeline policy that enables the retrieval of HTTP responses from API calls.
+ * Added `runtime.WithCaptureResponse` to enable the policy at the API level (off by default).
+
+### Breaking Changes
+* Moved `WithHTTPHeader` and `WithRetryOptions` from the `policy` package to the `runtime` package.
+
## 0.21.1 (2022-02-04)
### Bugs Fixed
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud/cloud.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud/cloud.go
new file mode 100644
index 00000000000..9d077a3e126
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud/cloud.go
@@ -0,0 +1,44 @@
+//go:build go1.18
+// +build go1.18
+
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+package cloud
+
+var (
+ // AzureChina contains configuration for Azure China.
+ AzureChina = Configuration{
+ ActiveDirectoryAuthorityHost: "https://login.chinacloudapi.cn/", Services: map[ServiceName]ServiceConfiguration{},
+ }
+ // AzureGovernment contains configuration for Azure Government.
+ AzureGovernment = Configuration{
+ ActiveDirectoryAuthorityHost: "https://login.microsoftonline.us/", Services: map[ServiceName]ServiceConfiguration{},
+ }
+ // AzurePublic contains configuration for Azure Public Cloud.
+ AzurePublic = Configuration{
+ ActiveDirectoryAuthorityHost: "https://login.microsoftonline.com/", Services: map[ServiceName]ServiceConfiguration{},
+ }
+)
+
+// ServiceName identifies a cloud service.
+type ServiceName string
+
+// ResourceManager is a global constant identifying Azure Resource Manager.
+const ResourceManager ServiceName = "resourceManager"
+
+// ServiceConfiguration configures a specific cloud service such as Azure Resource Manager.
+type ServiceConfiguration struct {
+ // Audience is the audience the client will request for its access tokens.
+ Audience string
+ // Endpoint is the service's base URL.
+ Endpoint string
+}
+
+// Configuration configures a cloud.
+type Configuration struct {
+ // ActiveDirectoryAuthorityHost is the base URL of the cloud's Azure Active Directory.
+ ActiveDirectoryAuthorityHost string
+ // Services contains configuration for the cloud's services.
+ Services map[ServiceName]ServiceConfiguration
+}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud/doc.go
new file mode 100644
index 00000000000..985b1bde2f2
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud/doc.go
@@ -0,0 +1,53 @@
+//go:build go1.16
+// +build go1.16
+
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+/*
+Package cloud implements a configuration API for applications deployed to sovereign or private Azure clouds.
+
+Azure SDK client configuration defaults are appropriate for Azure Public Cloud (sometimes referred to as
+"Azure Commercial" or simply "Microsoft Azure"). This package enables applications deployed to other
+Azure Clouds to configure clients appropriately.
+
+This package contains predefined configuration for well-known sovereign clouds such as Azure Government and
+Azure China. Azure SDK clients accept this configuration via the Cloud field of azcore.ClientOptions. For
+example, configuring a credential and ARM client for Azure Government:
+
+ opts := azcore.ClientOptions{Cloud: cloud.AzureGovernment}
+ cred, err := azidentity.NewDefaultAzureCredential(
+ &azidentity.DefaultAzureCredentialOptions{ClientOptions: opts},
+ )
+ handle(err)
+
+ client, err := armsubscription.NewClient(
+ cred, &arm.ClientOptions{ClientOptions: opts},
+ )
+ handle(err)
+
+Applications deployed to a private cloud such as Azure Stack create a Configuration object with
+appropriate values:
+
+ c := cloud.Configuration{
+ ActiveDirectoryAuthorityHost: "https://...",
+ Services: map[cloud.ServiceName]cloud.ServiceConfiguration{
+ cloud.ResourceManager: {
+ Audience: "...",
+ Endpoint: "https://...",
+ },
+ },
+ }
+ opts := azcore.ClientOptions{Cloud: c}
+
+ cred, err := azidentity.NewDefaultAzureCredential(
+ &azidentity.DefaultAzureCredentialOptions{ClientOptions: opts},
+ )
+ handle(err)
+
+ client, err := armsubscription.NewClient(
+ cred, &arm.ClientOptions{ClientOptions: opts},
+ )
+ handle(err)
+*/
+package cloud
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/core.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/core.go
index 68253efccc5..f9fb23422df 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/core.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/core.go
@@ -1,5 +1,5 @@
-//go:build go1.16
-// +build go1.16
+//go:build go1.18
+// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
@@ -7,30 +7,33 @@
package azcore
import (
+ "context"
"reflect"
+ "time"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
)
// AccessToken represents an Azure service bearer access token with expiry information.
-type AccessToken = shared.AccessToken
+type AccessToken struct {
+ Token string
+ ExpiresOn time.Time
+}
// TokenCredential represents a credential capable of providing an OAuth token.
-type TokenCredential = shared.TokenCredential
+type TokenCredential interface {
+ // GetToken requests an access token for the specified set of scopes.
+ GetToken(ctx context.Context, options policy.TokenRequestOptions) (AccessToken, error)
+}
// holds sentinel values used to send nulls
var nullables map[reflect.Type]interface{} = map[reflect.Type]interface{}{}
// NullValue is used to send an explicit 'null' within a request.
// This is typically used in JSON-MERGE-PATCH operations to delete a value.
-func NullValue(v interface{}) interface{} {
- t := reflect.TypeOf(v)
- if k := t.Kind(); k != reflect.Ptr && k != reflect.Slice && k != reflect.Map {
- // t is not of pointer type, make it be of pointer type
- t = reflect.PtrTo(t)
- }
+func NullValue[T any]() T {
+ t := shared.TypeOfT[T]()
v, found := nullables[t]
if !found {
var o reflect.Value
@@ -48,18 +51,14 @@ func NullValue(v interface{}) interface{} {
nullables[t] = v
}
// return the sentinel object
- return v
+ return v.(T)
}
// IsNullValue returns true if the field contains a null sentinel value.
// This is used by custom marshallers to properly encode a null value.
-func IsNullValue(v interface{}) bool {
+func IsNullValue[T any](v T) bool {
// see if our map has a sentinel object for this *T
t := reflect.TypeOf(v)
- if k := t.Kind(); k != reflect.Ptr && k != reflect.Slice && k != reflect.Map {
- // v isn't a pointer type so it can never be a null
- return false
- }
if o, found := nullables[t]; found {
o1 := reflect.ValueOf(o)
v1 := reflect.ValueOf(v)
@@ -74,6 +73,3 @@ func IsNullValue(v interface{}) bool {
// ClientOptions contains configuration settings for a client's pipeline.
type ClientOptions = policy.ClientOptions
-
-// Poller encapsulates state and logic for polling on long-running operations.
-type Poller = pollers.Poller
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/doc.go
index 69211850e42..28c64678c76 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/doc.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/doc.go
@@ -1,20 +1,20 @@
-//go:build go1.16
-// +build go1.16
+//go:build go1.18
+// +build go1.18
// Copyright 2017 Microsoft Corporation. All rights reserved.
// Use of this source code is governed by an MIT
// license that can be found in the LICENSE file.
/*
-Package azcore implements an HTTP request/response middleware pipeline.
+Package azcore implements an HTTP request/response middleware pipeline used by Azure SDK clients.
The middleware consists of three components.
- - One or more Policy instances.
- - A Transporter instance.
- - A Pipeline instance that combines the Policy and Transporter instances.
+ - One or more Policy instances.
+ - A Transporter instance.
+ - A Pipeline instance that combines the Policy and Transporter instances.
-Implementing the Policy Interface
+# Implementing the Policy Interface
A Policy can be implemented in two ways; as a first-class function for a stateless Policy, or as
a method on a type for a stateful Policy. Note that HTTP requests made via the same pipeline share
@@ -34,53 +34,53 @@ and error instances to its caller.
Template for implementing a stateless Policy:
- type policyFunc func(*policy.Request) (*http.Response, error)
- // Do implements the Policy interface on policyFunc.
+ type policyFunc func(*policy.Request) (*http.Response, error)
- func (pf policyFunc) Do(req *policy.Request) (*http.Response, error) {
- return pf(req)
- }
+ // Do implements the Policy interface on policyFunc.
+ func (pf policyFunc) Do(req *policy.Request) (*http.Response, error) {
+ return pf(req)
+ }
- func NewMyStatelessPolicy() policy.Policy {
- return policyFunc(func(req *policy.Request) (*http.Response, error) {
- // TODO: mutate/process Request here
+ func NewMyStatelessPolicy() policy.Policy {
+ return policyFunc(func(req *policy.Request) (*http.Response, error) {
+ // TODO: mutate/process Request here
- // forward Request to next Policy & get Response/error
- resp, err := req.Next()
+ // forward Request to next Policy & get Response/error
+ resp, err := req.Next()
- // TODO: mutate/process Response/error here
+ // TODO: mutate/process Response/error here
- // return Response/error to previous Policy
- return resp, err
- })
- }
+ // return Response/error to previous Policy
+ return resp, err
+ })
+ }
Template for implementing a stateful Policy:
- type MyStatefulPolicy struct {
- // TODO: add configuration/setting fields here
- }
+ type MyStatefulPolicy struct {
+ // TODO: add configuration/setting fields here
+ }
- // TODO: add initialization args to NewMyStatefulPolicy()
- func NewMyStatefulPolicy() policy.Policy {
- return &MyStatefulPolicy{
- // TODO: initialize configuration/setting fields here
- }
- }
+ // TODO: add initialization args to NewMyStatefulPolicy()
+ func NewMyStatefulPolicy() policy.Policy {
+ return &MyStatefulPolicy{
+ // TODO: initialize configuration/setting fields here
+ }
+ }
- func (p *MyStatefulPolicy) Do(req *policy.Request) (resp *http.Response, err error) {
- // TODO: mutate/process Request here
+ func (p *MyStatefulPolicy) Do(req *policy.Request) (resp *http.Response, err error) {
+ // TODO: mutate/process Request here
- // forward Request to next Policy & get Response/error
- resp, err := req.Next()
+ // forward Request to next Policy & get Response/error
+ resp, err := req.Next()
- // TODO: mutate/process Response/error here
+ // TODO: mutate/process Response/error here
- // return Response/error to previous Policy
- return resp, err
- }
+ // return Response/error to previous Policy
+ return resp, err
+ }
-Implementing the Transporter Interface
+# Implementing the Transporter Interface
The Transporter interface is responsible for sending the HTTP request and returning the corresponding
HTTP response or error. The Transporter is invoked by the last Policy in the chain. The default Transporter
@@ -88,87 +88,87 @@ implementation uses a shared http.Client from the standard library.
The same stateful/stateless rules for Policy implementations apply to Transporter implementations.
-Using Policy and Transporter Instances Via a Pipeline
+# Using Policy and Transporter Instances Via a Pipeline
To use the Policy and Transporter instances, an application passes them to the runtime.NewPipeline function.
- func NewPipeline(transport Transporter, policies ...Policy) Pipeline
+ func NewPipeline(transport Transporter, policies ...Policy) Pipeline
The specified Policy instances form a chain and are invoked in the order provided to NewPipeline
followed by the Transporter.
Once the Pipeline has been created, create a runtime.Request instance and pass it to Pipeline's Do method.
- func NewRequest(ctx context.Context, httpMethod string, endpoint string) (*Request, error)
+ func NewRequest(ctx context.Context, httpMethod string, endpoint string) (*Request, error)
- func (p Pipeline) Do(req *Request) (*http.Request, error)
+ func (p Pipeline) Do(req *Request) (*http.Request, error)
The Pipeline.Do method sends the specified Request through the chain of Policy and Transporter
instances. The response/error is then sent through the same chain of Policy instances in reverse
order. For example, assuming there are Policy types PolicyA, PolicyB, and PolicyC along with
TransportA.
- pipeline := NewPipeline(TransportA, PolicyA, PolicyB, PolicyC)
+ pipeline := NewPipeline(TransportA, PolicyA, PolicyB, PolicyC)
The flow of Request and Response looks like the following:
- policy.Request -> PolicyA -> PolicyB -> PolicyC -> TransportA -----+
- |
- HTTP(s) endpoint
- |
- caller <--------- PolicyA <- PolicyB <- PolicyC <- http.Response-+
+ policy.Request -> PolicyA -> PolicyB -> PolicyC -> TransportA -----+
+ |
+ HTTP(S) endpoint
+ |
+ caller <--------- PolicyA <- PolicyB <- PolicyC <- http.Response-+
-Creating a Request Instance
+# Creating a Request Instance
The Request instance passed to Pipeline's Do method is a wrapper around an *http.Request. It also
contains some internal state and provides various convenience methods. You create a Request instance
by calling the runtime.NewRequest function:
- func NewRequest(ctx context.Context, httpMethod string, endpoint string) (*Request, error)
+ func NewRequest(ctx context.Context, httpMethod string, endpoint string) (*Request, error)
If the Request should contain a body, call the SetBody method.
- func (req *Request) SetBody(body ReadSeekCloser, contentType string) error
+ func (req *Request) SetBody(body ReadSeekCloser, contentType string) error
A seekable stream is required so that upon retry, the retry Policy instance can seek the stream
back to the beginning before retrying the network request and re-uploading the body.
-Sending an Explicit Null
+# Sending an Explicit Null
Operations like JSON-MERGE-PATCH send a JSON null to indicate a value should be deleted.
- {
- "delete-me": null
- }
+ {
+ "delete-me": null
+ }
This requirement conflicts with the SDK's default marshalling that specifies "omitempty" as
a means to resolve the ambiguity between a field to be excluded and its zero-value.
- type Widget struct {
- Name *string `json:",omitempty"`
- Count *int `json:",omitempty"`
- }
+ type Widget struct {
+ Name *string `json:",omitempty"`
+ Count *int `json:",omitempty"`
+ }
In the above example, Name and Count are defined as pointer-to-type to disambiguate between
a missing value (nil) and a zero-value (0) which might have semantic differences.
-In a PATCH operation, any fields left as `nil` are to have their values preserved. When updating
+In a PATCH operation, any fields left as nil are to have their values preserved. When updating
a Widget's count, one simply specifies the new value for Count, leaving Name nil.
To fulfill the requirement for sending a JSON null, the NullValue() function can be used.
- w := Widget{
- Count: azcore.NullValue(0).(*int),
- }
+ w := Widget{
+ Count: azcore.NullValue[*int](),
+ }
This sends an explict "null" for Count, indicating that any current value for Count should be deleted.
-Processing the Response
+# Processing the Response
When the HTTP response is received, the *http.Response is returned directly. Each Policy instance
can inspect/mutate the *http.Response.
-Built-in Logging
+# Built-in Logging
To enable logging, set environment variable AZURE_SDK_GO_LOGGING to "all" before executing your program.
@@ -177,5 +177,81 @@ a callback that writes to the desired location. Any custom logging implementati
own synchronization to handle concurrent invocations.
See the docs for the log package for further details.
+
+# Pageable Operations
+
+Pageable operations return potentially large data sets spread over multiple GET requests. The result of
+each GET is a "page" of data consisting of a slice of items.
+
+Pageable operations can be identified by their New*Pager naming convention and return type of *runtime.Pager[T].
+
+ func (c *WidgetClient) NewListWidgetsPager(o *Options) *runtime.Pager[PageResponse]
+
+The call to WidgetClient.NewListWidgetsPager() returns an instance of *runtime.Pager[T] for fetching pages
+and determining if there are more pages to fetch. No IO calls are made until the NextPage() method is invoked.
+
+ pager := widgetClient.NewListWidgetsPager(nil)
+ for pager.More() {
+ page, err := pager.NextPage(context.TODO())
+ // handle err
+ for _, widget := range page.Values {
+ // process widget
+ }
+ }
+
+# Long-Running Operations
+
+Long-running operations (LROs) are operations consisting of an initial request to start the operation followed
+by polling to determine when the operation has reached a terminal state. An LRO's terminal state is one
+of the following values.
+
+ - Succeeded - the LRO completed successfully
+ - Failed - the LRO failed to complete
+ - Canceled - the LRO was canceled
+
+LROs can be identified by their Begin* prefix and their return type of *runtime.Poller[T].
+
+ func (c *WidgetClient) BeginCreateOrUpdate(ctx context.Context, w Widget, o *Options) (*runtime.Poller[Response], error)
+
+When a call to WidgetClient.BeginCreateOrUpdate() returns a nil error, it means that the LRO has started.
+It does _not_ mean that the widget has been created or updated (or failed to be created/updated).
+
+The *runtime.Poller[T] provides APIs for determining the state of the LRO. To wait for the LRO to complete,
+call the PollUntilDone() method.
+
+ poller, err := widgetClient.BeginCreateOrUpdate(context.TODO(), Widget{}, nil)
+ // handle err
+ result, err := poller.PollUntilDone(context.TODO(), nil)
+ // handle err
+ // use result
+
+The call to PollUntilDone() will block the current goroutine until the LRO has reached a terminal state or the
+context is canceled/timed out.
+
+Note that LROs can take anywhere from several seconds to several minutes. The duration is operation-dependent. Due to
+this variant behavior, pollers do _not_ have a preconfigured time-out. Use a context with the appropriate cancellation
+mechanism as required.
+
+# Resume Tokens
+
+Pollers provide the ability to serialize their state into a "resume token" which can be used by another process to
+recreate the poller. This is achieved via the runtime.Poller[T].ResumeToken() method.
+
+ token, err := poller.ResumeToken()
+ // handle error
+
+Note that a token can only be obtained for a poller that's in a non-terminal state. Also note that any subsequent calls
+to poller.Poll() might change the poller's state. In this case, a new token should be created.
+
+After the token has been obtained, it can be used to recreate an instance of the originating poller.
+
+ poller, err := widgetClient.BeginCreateOrUpdate(nil, Widget{}, &Options{
+ ResumeToken: token,
+ })
+
+When resuming a poller, no IO is performed, and zero-value arguments can be used for everything but the Options.ResumeToken.
+
+Resume tokens are unique per service client and operation. Attempting to resume a poller for LRO BeginB() with a token from LRO
+BeginA() will result in an error.
*/
package azcore
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/errors.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/errors.go
index daf415f1bf0..17bd50c6732 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/errors.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/errors.go
@@ -1,16 +1,14 @@
-//go:build go1.16
-// +build go1.16
+//go:build go1.18
+// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package azcore
-import (
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared"
-)
+import "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported"
// ResponseError is returned when a request is made to a service and
// the service returns a non-success HTTP status code.
// Use errors.As() to access this type in the error chain.
-type ResponseError = shared.ResponseError
+type ResponseError = exported.ResponseError
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/etag.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/etag.go
index d862501cea9..23ea7e7c8ea 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/etag.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/etag.go
@@ -1,5 +1,5 @@
-//go:build go1.16
-// +build go1.16
+//go:build go1.18
+// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/exported.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/exported.go
new file mode 100644
index 00000000000..6e029d493ce
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/exported.go
@@ -0,0 +1,60 @@
+//go:build go1.18
+// +build go1.18
+
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+package exported
+
+import (
+ "io"
+ "net/http"
+
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared"
+)
+
+type nopCloser struct {
+ io.ReadSeeker
+}
+
+func (n nopCloser) Close() error {
+ return nil
+}
+
+// NopCloser returns a ReadSeekCloser with a no-op close method wrapping the provided io.ReadSeeker.
+// Exported as streaming.NopCloser().
+func NopCloser(rs io.ReadSeeker) io.ReadSeekCloser {
+ return nopCloser{rs}
+}
+
+// HasStatusCode returns true if the Response's status code is one of the specified values.
+// Exported as runtime.HasStatusCode().
+func HasStatusCode(resp *http.Response, statusCodes ...int) bool {
+ if resp == nil {
+ return false
+ }
+ for _, sc := range statusCodes {
+ if resp.StatusCode == sc {
+ return true
+ }
+ }
+ return false
+}
+
+// Payload reads and returns the response body or an error.
+// On a successful read, the response body is cached.
+// Subsequent reads will access the cached value.
+// Exported as runtime.Payload().
+func Payload(resp *http.Response) ([]byte, error) {
+ // r.Body won't be a nopClosingBytesReader if downloading was skipped
+ if buf, ok := resp.Body.(*shared.NopClosingBytesReader); ok {
+ return buf.Bytes(), nil
+ }
+ bytesBody, err := io.ReadAll(resp.Body)
+ resp.Body.Close()
+ if err != nil {
+ return nil, err
+ }
+ resp.Body = shared.NewNopClosingBytesReader(bytesBody)
+ return bytesBody, nil
+}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pipeline/pipeline.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/pipeline.go
similarity index 92%
rename from vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pipeline/pipeline.go
rename to vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/pipeline.go
index e2c9f115a1d..c44efd6eff5 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pipeline/pipeline.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/pipeline.go
@@ -1,10 +1,10 @@
-//go:build go1.16
-// +build go1.16
+//go:build go1.18
+// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
-package pipeline
+package exported
import (
"errors"
@@ -16,6 +16,7 @@ import (
// Policy represents an extensibility point for the Pipeline that can mutate the specified
// Request and react to the received Response.
+// Exported as policy.Policy.
type Policy interface {
// Do applies the policy to the specified Request. When implementing a Policy, mutate the
// request before calling req.Next() to move on to the next policy, and respond to the result
@@ -25,11 +26,13 @@ type Policy interface {
// Pipeline represents a primitive for sending HTTP requests and receiving responses.
// Its behavior can be extended by specifying policies during construction.
+// Exported as runtime.Pipeline.
type Pipeline struct {
policies []Policy
}
// Transporter represents an HTTP pipeline transport used to send HTTP requests and receive responses.
+// Exported as policy.Transporter.
type Transporter interface {
// Do sends the HTTP request and returns the HTTP response or error.
Do(req *http.Request) (*http.Response, error)
@@ -56,6 +59,7 @@ func (tp transportPolicy) Do(req *Request) (*http.Response, error) {
}
// NewPipeline creates a new Pipeline object from the specified Policies.
+// Not directly exported, but used as part of runtime.NewPipeline().
func NewPipeline(transport Transporter, policies ...Policy) Pipeline {
// transport policy must always be the last in the slice
policies = append(policies, transportPolicy{trans: transport})
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pipeline/request.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go
similarity index 90%
rename from vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pipeline/request.go
rename to vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go
index e88768bb244..4aeec158937 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pipeline/request.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go
@@ -1,10 +1,10 @@
-//go:build go1.16
-// +build go1.16
+//go:build go1.18
+// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
-package pipeline
+package exported
import (
"context"
@@ -18,17 +18,9 @@ import (
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared"
)
-// PolicyFunc is a type that implements the Policy interface.
-// Use this type when implementing a stateless policy as a first-class function.
-type PolicyFunc func(*Request) (*http.Response, error)
-
-// Do implements the Policy interface on PolicyFunc.
-func (pf PolicyFunc) Do(req *Request) (*http.Response, error) {
- return pf(req)
-}
-
// Request is an abstraction over the creation of an HTTP request as it passes through the pipeline.
// Don't use this type directly, use NewRequest() instead.
+// Exported as policy.Request.
type Request struct {
req *http.Request
body io.ReadSeekCloser
@@ -53,6 +45,7 @@ func (ov opValues) get(value interface{}) bool {
}
// NewRequest creates a new Request with the specified input.
+// Exported as runtime.NewRequest().
func NewRequest(ctx context.Context, httpMethod string, endpoint string) (*Request, error) {
req, err := http.NewRequestWithContext(ctx, httpMethod, endpoint, nil)
if err != nil {
@@ -157,8 +150,7 @@ func (req *Request) Close() error {
// Clone returns a deep copy of the request with its context changed to ctx.
func (req *Request) Clone(ctx context.Context) *Request {
- r2 := Request{}
- r2 = *req
+ r2 := *req
r2.req = req.req.Clone(ctx)
return &r2
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/response_error.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/response_error.go
similarity index 89%
rename from vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/response_error.go
rename to vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/response_error.go
index e2297706d71..3db6acc8325 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/response_error.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/response_error.go
@@ -1,10 +1,10 @@
-//go:build go1.16
-// +build go1.16
+//go:build go1.18
+// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
-package shared
+package exported
import (
"bytes"
@@ -15,6 +15,7 @@ import (
)
// NewResponseError creates a new *ResponseError from the provided HTTP response.
+// Exported as runtime.NewResponseError().
func NewResponseError(resp *http.Response) error {
respErr := &ResponseError{
StatusCode: resp.StatusCode,
@@ -59,6 +60,13 @@ func extractErrorCodeJSON(body []byte) string {
return ""
}
rawObj = unwrapped
+ } else if wrapped, ok := rawObj["odata.error"]; ok {
+ // check if this a wrapped odata error, i.e. { "odata.error": { ... } }
+ unwrapped, ok := wrapped.(map[string]any)
+ if !ok {
+ return ""
+ }
+ rawObj = unwrapped
}
// now check for the error code
@@ -75,7 +83,7 @@ func extractErrorCodeJSON(body []byte) string {
func extractErrorCodeXML(body []byte) string {
// regular expression is much easier than dealing with the XML parser
- rx := regexp.MustCompile(`<[c|C]ode>\s*(\w+)\s*<\/[c|C]ode>`)
+ rx := regexp.MustCompile(`<(?:\w+:)?[c|C]ode>\s*(\w+)\s*<\/(?:\w+:)?[c|C]ode>`)
res := rx.FindStringSubmatch(string(body))
if len(res) != 2 {
return ""
@@ -87,6 +95,7 @@ func extractErrorCodeXML(body []byte) string {
// ResponseError is returned when a request is made to a service and
// the service returns a non-success HTTP status code.
// Use errors.As() to access this type in the error chain.
+// Exported as azcore.ResponseError.
type ResponseError struct {
// ErrorCode is the error code returned by the resource provider if available.
ErrorCode string
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log/log.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log/log.go
new file mode 100644
index 00000000000..0684cb31739
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log/log.go
@@ -0,0 +1,38 @@
+//go:build go1.18
+// +build go1.18
+
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+// This is an internal helper package to combine the complete logging APIs.
+package log
+
+import (
+ azlog "github.com/Azure/azure-sdk-for-go/sdk/azcore/log"
+ "github.com/Azure/azure-sdk-for-go/sdk/internal/log"
+)
+
+type Event = log.Event
+
+const (
+ EventRequest = azlog.EventRequest
+ EventResponse = azlog.EventResponse
+ EventRetryPolicy = azlog.EventRetryPolicy
+ EventLRO = azlog.EventLRO
+)
+
+func Write(cls log.Event, msg string) {
+ log.Write(cls, msg)
+}
+
+func Writef(cls log.Event, format string, a ...interface{}) {
+ log.Writef(cls, format, a...)
+}
+
+func SetListener(lst func(Event, string)) {
+ log.SetListener(lst)
+}
+
+func Should(cls log.Event) bool {
+ return log.Should(cls)
+}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/async/async.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/async/async.go
new file mode 100644
index 00000000000..4f0441dc166
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/async/async.go
@@ -0,0 +1,154 @@
+//go:build go1.18
+// +build go1.18
+
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+package async
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "net/http"
+
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared"
+)
+
+// see https://github.com/Azure/azure-resource-manager-rpc/blob/master/v1.0/async-api-reference.md
+
+// Applicable returns true if the LRO is using Azure-AsyncOperation.
+func Applicable(resp *http.Response) bool {
+ return resp.Header.Get(shared.HeaderAzureAsync) != ""
+}
+
+// CanResume returns true if the token can rehydrate this poller type.
+func CanResume(token map[string]interface{}) bool {
+ _, ok := token["asyncURL"]
+ return ok
+}
+
+// Poller is an LRO poller that uses the Azure-AsyncOperation pattern.
+type Poller[T any] struct {
+ pl exported.Pipeline
+
+ resp *http.Response
+
+ // The URL from Azure-AsyncOperation header.
+ AsyncURL string `json:"asyncURL"`
+
+ // The URL from Location header.
+ LocURL string `json:"locURL"`
+
+ // The URL from the initial LRO request.
+ OrigURL string `json:"origURL"`
+
+ // The HTTP method from the initial LRO request.
+ Method string `json:"method"`
+
+ // The value of final-state-via from swagger, can be the empty string.
+ FinalState pollers.FinalStateVia `json:"finalState"`
+
+ // The LRO's current state.
+ CurState string `json:"state"`
+}
+
+// New creates a new Poller from the provided initial response and final-state type.
+// Pass nil for response to create an empty Poller for rehydration.
+func New[T any](pl exported.Pipeline, resp *http.Response, finalState pollers.FinalStateVia) (*Poller[T], error) {
+ if resp == nil {
+ log.Write(log.EventLRO, "Resuming Azure-AsyncOperation poller.")
+ return &Poller[T]{pl: pl}, nil
+ }
+ log.Write(log.EventLRO, "Using Azure-AsyncOperation poller.")
+ asyncURL := resp.Header.Get(shared.HeaderAzureAsync)
+ if asyncURL == "" {
+ return nil, errors.New("response is missing Azure-AsyncOperation header")
+ }
+ if !pollers.IsValidURL(asyncURL) {
+ return nil, fmt.Errorf("invalid polling URL %s", asyncURL)
+ }
+ // check for provisioning state. if the operation is a RELO
+ // and terminates synchronously this will prevent extra polling.
+ // it's ok if there's no provisioning state.
+ state, _ := pollers.GetProvisioningState(resp)
+ if state == "" {
+ state = pollers.StatusInProgress
+ }
+ p := &Poller[T]{
+ pl: pl,
+ resp: resp,
+ AsyncURL: asyncURL,
+ LocURL: resp.Header.Get(shared.HeaderLocation),
+ OrigURL: resp.Request.URL.String(),
+ Method: resp.Request.Method,
+ FinalState: finalState,
+ CurState: state,
+ }
+ return p, nil
+}
+
+// Done returns true if the LRO is in a terminal state.
+func (p *Poller[T]) Done() bool {
+ return pollers.IsTerminalState(p.CurState)
+}
+
+// Poll retrieves the current state of the LRO.
+func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) {
+ err := pollers.PollHelper(ctx, p.AsyncURL, p.pl, func(resp *http.Response) (string, error) {
+ state, err := pollers.GetStatus(resp)
+ if err != nil {
+ return "", err
+ } else if state == "" {
+ return "", errors.New("the response did not contain a status")
+ }
+ p.resp = resp
+ p.CurState = state
+ return p.CurState, nil
+ })
+ if err != nil {
+ return nil, err
+ }
+ return p.resp, nil
+}
+
+func (p *Poller[T]) Result(ctx context.Context, out *T) error {
+ if p.resp.StatusCode == http.StatusNoContent {
+ return nil
+ } else if pollers.Failed(p.CurState) {
+ return exported.NewResponseError(p.resp)
+ }
+ var req *exported.Request
+ var err error
+ if p.Method == http.MethodPatch || p.Method == http.MethodPut {
+ // for PATCH and PUT, the final GET is on the original resource URL
+ req, err = exported.NewRequest(ctx, http.MethodGet, p.OrigURL)
+ } else if p.Method == http.MethodPost {
+ if p.FinalState == pollers.FinalStateViaAzureAsyncOp {
+ // no final GET required
+ } else if p.FinalState == pollers.FinalStateViaOriginalURI {
+ req, err = exported.NewRequest(ctx, http.MethodGet, p.OrigURL)
+ } else if p.LocURL != "" {
+ // ideally FinalState would be set to "location" but it isn't always.
+ // must check last due to more permissive condition.
+ req, err = exported.NewRequest(ctx, http.MethodGet, p.LocURL)
+ }
+ }
+ if err != nil {
+ return err
+ }
+
+ // if a final GET request has been created, execute it
+ if req != nil {
+ resp, err := p.pl.Do(req)
+ if err != nil {
+ return err
+ }
+ p.resp = resp
+ }
+
+ return pollers.ResultHelper(p.resp, pollers.Failed(p.CurState), out)
+}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/body/body.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/body/body.go
new file mode 100644
index 00000000000..99e9f2f8d0a
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/body/body.go
@@ -0,0 +1,130 @@
+//go:build go1.18
+// +build go1.18
+
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+package body
+
+import (
+ "context"
+ "errors"
+ "net/http"
+
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers"
+)
+
+// Kind is the identifier of this type in a resume token.
+const kind = "body"
+
+// Applicable returns true if the LRO is using no headers, just provisioning state.
+// This is only applicable to PATCH and PUT methods and assumes no polling headers.
+func Applicable(resp *http.Response) bool {
+ // we can't check for absense of headers due to some misbehaving services
+ // like redis that return a Location header but don't actually use that protocol
+ return resp.Request.Method == http.MethodPatch || resp.Request.Method == http.MethodPut
+}
+
+// CanResume returns true if the token can rehydrate this poller type.
+func CanResume(token map[string]interface{}) bool {
+ t, ok := token["type"]
+ if !ok {
+ return false
+ }
+ tt, ok := t.(string)
+ if !ok {
+ return false
+ }
+ return tt == kind
+}
+
+// Poller is an LRO poller that uses the Body pattern.
+type Poller[T any] struct {
+ pl exported.Pipeline
+
+ resp *http.Response
+
+ // The poller's type, used for resume token processing.
+ Type string `json:"type"`
+
+ // The URL for polling.
+ PollURL string `json:"pollURL"`
+
+ // The LRO's current state.
+ CurState string `json:"state"`
+}
+
+// New creates a new Poller from the provided initial response.
+// Pass nil for response to create an empty Poller for rehydration.
+func New[T any](pl exported.Pipeline, resp *http.Response) (*Poller[T], error) {
+ if resp == nil {
+ log.Write(log.EventLRO, "Resuming Body poller.")
+ return &Poller[T]{pl: pl}, nil
+ }
+ log.Write(log.EventLRO, "Using Body poller.")
+ p := &Poller[T]{
+ pl: pl,
+ resp: resp,
+ Type: kind,
+ PollURL: resp.Request.URL.String(),
+ }
+ // default initial state to InProgress. depending on the HTTP
+ // status code and provisioning state, we might change the value.
+ curState := pollers.StatusInProgress
+ provState, err := pollers.GetProvisioningState(resp)
+ if err != nil && !errors.Is(err, pollers.ErrNoBody) {
+ return nil, err
+ }
+ if resp.StatusCode == http.StatusCreated && provState != "" {
+ // absense of provisioning state is ok for a 201, means the operation is in progress
+ curState = provState
+ } else if resp.StatusCode == http.StatusOK {
+ if provState != "" {
+ curState = provState
+ } else if provState == "" {
+ // for a 200, absense of provisioning state indicates success
+ curState = pollers.StatusSucceeded
+ }
+ } else if resp.StatusCode == http.StatusNoContent {
+ curState = pollers.StatusSucceeded
+ }
+ p.CurState = curState
+ return p, nil
+}
+
+func (p *Poller[T]) Done() bool {
+ return pollers.IsTerminalState(p.CurState)
+}
+
+func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) {
+ err := pollers.PollHelper(ctx, p.PollURL, p.pl, func(resp *http.Response) (string, error) {
+ if resp.StatusCode == http.StatusNoContent {
+ p.resp = resp
+ p.CurState = pollers.StatusSucceeded
+ return p.CurState, nil
+ }
+ state, err := pollers.GetProvisioningState(resp)
+ if errors.Is(err, pollers.ErrNoBody) {
+ // a missing response body in non-204 case is an error
+ return "", err
+ } else if state == "" {
+ // a response body without provisioning state is considered terminal success
+ state = pollers.StatusSucceeded
+ } else if err != nil {
+ return "", err
+ }
+ p.resp = resp
+ p.CurState = state
+ return p.CurState, nil
+ })
+ if err != nil {
+ return nil, err
+ }
+ return p.resp, nil
+}
+
+func (p *Poller[T]) Result(ctx context.Context, out *T) error {
+ return pollers.ResultHelper(p.resp, pollers.Failed(p.CurState), out)
+}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc/loc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc/loc.go
index 35f318a1c71..276685da443 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc/loc.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc/loc.go
@@ -1,5 +1,5 @@
-//go:build go1.16
-// +build go1.16
+//go:build go1.18
+// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
@@ -7,32 +7,55 @@
package loc
import (
+ "context"
"errors"
"fmt"
"net/http"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared"
- "github.com/Azure/azure-sdk-for-go/sdk/internal/log"
)
// Kind is the identifier of this type in a resume token.
-const Kind = "Location"
+const kind = "loc"
// Applicable returns true if the LRO is using Location.
func Applicable(resp *http.Response) bool {
return resp.Header.Get(shared.HeaderLocation) != ""
}
+// CanResume returns true if the token can rehydrate this poller type.
+func CanResume(token map[string]interface{}) bool {
+ t, ok := token["type"]
+ if !ok {
+ return false
+ }
+ tt, ok := t.(string)
+ if !ok {
+ return false
+ }
+ return tt == kind
+}
+
// Poller is an LRO poller that uses the Location pattern.
-type Poller struct {
+type Poller[T any] struct {
+ pl exported.Pipeline
+ resp *http.Response
+
Type string `json:"type"`
PollURL string `json:"pollURL"`
- CurState int `json:"state"`
+ CurState string `json:"state"`
}
// New creates a new Poller from the provided initial response.
-func New(resp *http.Response, pollerID string) (*Poller, error) {
+// Pass nil for response to create an empty Poller for rehydration.
+func New[T any](pl exported.Pipeline, resp *http.Response) (*Poller[T], error) {
+ if resp == nil {
+ log.Write(log.EventLRO, "Resuming Location poller.")
+ return &Poller[T]{pl: pl}, nil
+ }
log.Write(log.EventLRO, "Using Location poller.")
locURL := resp.Header.Get(shared.HeaderLocation)
if locURL == "" {
@@ -41,40 +64,55 @@ func New(resp *http.Response, pollerID string) (*Poller, error) {
if !pollers.IsValidURL(locURL) {
return nil, fmt.Errorf("invalid polling URL %s", locURL)
}
- return &Poller{
- Type: pollers.MakeID(pollerID, Kind),
+ // check for provisioning state. if the operation is a RELO
+ // and terminates synchronously this will prevent extra polling.
+ // it's ok if there's no provisioning state.
+ state, _ := pollers.GetProvisioningState(resp)
+ if state == "" {
+ state = pollers.StatusInProgress
+ }
+ return &Poller[T]{
+ pl: pl,
+ resp: resp,
+ Type: kind,
PollURL: locURL,
- CurState: resp.StatusCode,
+ CurState: state,
}, nil
}
-func (p *Poller) URL() string {
- return p.PollURL
-}
-
-func (p *Poller) Done() bool {
- return pollers.IsTerminalState(p.Status())
+func (p *Poller[T]) Done() bool {
+ return pollers.IsTerminalState(p.CurState)
}
-func (p *Poller) Update(resp *http.Response) error {
- // if the endpoint returned a location header, update cached value
- if loc := resp.Header.Get(shared.HeaderLocation); loc != "" {
- p.PollURL = loc
+func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) {
+ err := pollers.PollHelper(ctx, p.PollURL, p.pl, func(resp *http.Response) (string, error) {
+ // location polling can return an updated polling URL
+ if h := resp.Header.Get(shared.HeaderLocation); h != "" {
+ p.PollURL = h
+ }
+ // if provisioning state is available, use that. this is only
+ // for some ARM LRO scenarios (e.g. DELETE with a Location header)
+ // so if it's missing then use HTTP status code.
+ provState, _ := pollers.GetProvisioningState(resp)
+ p.resp = resp
+ if provState != "" {
+ p.CurState = provState
+ } else if resp.StatusCode == http.StatusAccepted {
+ p.CurState = pollers.StatusInProgress
+ } else if resp.StatusCode > 199 && resp.StatusCode < 300 {
+ // any 2xx other than a 202 indicates success
+ p.CurState = pollers.StatusSucceeded
+ } else {
+ p.CurState = pollers.StatusFailed
+ }
+ return p.CurState, nil
+ })
+ if err != nil {
+ return nil, err
}
- p.CurState = resp.StatusCode
- return nil
+ return p.resp, nil
}
-func (*Poller) FinalGetURL() string {
- return ""
-}
-
-func (p *Poller) Status() string {
- if p.CurState == http.StatusAccepted {
- return pollers.StatusInProgress
- } else if p.CurState > 199 && p.CurState < 300 {
- // any 2xx other than a 202 indicates success
- return pollers.StatusSucceeded
- }
- return pollers.StatusFailed
+func (p *Poller[T]) Result(ctx context.Context, out *T) error {
+ return pollers.ResultHelper(p.resp, pollers.Failed(p.CurState), out)
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op/op.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op/op.go
index 9795e294656..dd714e768c5 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op/op.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op/op.go
@@ -1,5 +1,5 @@
-//go:build go1.16
-// +build go1.16
+//go:build go1.18
+// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
@@ -7,34 +7,48 @@
package op
import (
+ "context"
"errors"
"fmt"
"net/http"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared"
- "github.com/Azure/azure-sdk-for-go/sdk/internal/log"
)
-// Kind is the identifier of this type in a resume token.
-const Kind = "Operation-Location"
-
// Applicable returns true if the LRO is using Operation-Location.
func Applicable(resp *http.Response) bool {
return resp.Header.Get(shared.HeaderOperationLocation) != ""
}
+// CanResume returns true if the token can rehydrate this poller type.
+func CanResume(token map[string]interface{}) bool {
+ _, ok := token["oplocURL"]
+ return ok
+}
+
// Poller is an LRO poller that uses the Operation-Location pattern.
-type Poller struct {
- Type string `json:"type"`
- PollURL string `json:"pollURL"`
- LocURL string `json:"locURL"`
- FinalGET string `json:"finalGET"`
- CurState string `json:"state"`
+type Poller[T any] struct {
+ pl exported.Pipeline
+ resp *http.Response
+
+ OpLocURL string `json:"oplocURL"`
+ LocURL string `json:"locURL"`
+ OrigURL string `json:"origURL"`
+ Method string `json:"method"`
+ FinalState pollers.FinalStateVia `json:"finalState"`
+ CurState string `json:"state"`
}
// New creates a new Poller from the provided initial response.
-func New(resp *http.Response, pollerID string) (*Poller, error) {
+// Pass nil for response to create an empty Poller for rehydration.
+func New[T any](pl exported.Pipeline, resp *http.Response, finalState pollers.FinalStateVia) (*Poller[T], error) {
+ if resp == nil {
+ log.Write(log.EventLRO, "Resuming Operation-Location poller.")
+ return &Poller[T]{pl: pl}, nil
+ }
log.Write(log.EventLRO, "Using Operation-Location poller.")
opURL := resp.Header.Get(shared.HeaderOperationLocation)
if opURL == "" {
@@ -51,82 +65,76 @@ func New(resp *http.Response, pollerID string) (*Poller, error) {
// default initial state to InProgress. if the
// service sent us a status then use that instead.
curState := pollers.StatusInProgress
- status, err := getValue(resp, "status")
- if err != nil && !errors.Is(err, shared.ErrNoBody) {
+ status, err := pollers.GetStatus(resp)
+ if err != nil && !errors.Is(err, pollers.ErrNoBody) {
return nil, err
}
if status != "" {
curState = status
}
- // calculate the tentative final GET URL.
- // can change if we receive a resourceLocation.
- // it's ok for it to be empty in some cases.
- finalGET := ""
- if resp.Request.Method == http.MethodPatch || resp.Request.Method == http.MethodPut {
- finalGET = resp.Request.URL.String()
- } else if resp.Request.Method == http.MethodPost && locURL != "" {
- finalGET = locURL
- }
- return &Poller{
- Type: pollers.MakeID(pollerID, Kind),
- PollURL: opURL,
- LocURL: locURL,
- FinalGET: finalGET,
- CurState: curState,
- }, nil
-}
-func (p *Poller) URL() string {
- return p.PollURL
+ return &Poller[T]{
+ pl: pl,
+ resp: resp,
+ OpLocURL: opURL,
+ LocURL: locURL,
+ OrigURL: resp.Request.URL.String(),
+ Method: resp.Request.Method,
+ FinalState: finalState,
+ CurState: curState,
+ }, nil
}
-func (p *Poller) Done() bool {
- return pollers.IsTerminalState(p.Status())
+func (p *Poller[T]) Done() bool {
+ return pollers.IsTerminalState(p.CurState)
}
-func (p *Poller) Update(resp *http.Response) error {
- status, err := getValue(resp, "status")
+func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) {
+ err := pollers.PollHelper(ctx, p.OpLocURL, p.pl, func(resp *http.Response) (string, error) {
+ state, err := pollers.GetStatus(resp)
+ if err != nil {
+ return "", err
+ } else if state == "" {
+ return "", errors.New("the response did not contain a status")
+ }
+ p.resp = resp
+ p.CurState = state
+ return p.CurState, nil
+ })
if err != nil {
- return err
- } else if status == "" {
- return errors.New("the response did not contain a status")
- }
- p.CurState = status
- // if the endpoint returned an operation-location header, update cached value
- if opLoc := resp.Header.Get(shared.HeaderOperationLocation); opLoc != "" {
- p.PollURL = opLoc
- }
- // check for resourceLocation
- resLoc, err := getValue(resp, "resourceLocation")
- if err != nil && !errors.Is(err, shared.ErrNoBody) {
- return err
- } else if resLoc != "" {
- p.FinalGET = resLoc
+ return nil, err
}
- return nil
-}
-
-func (p *Poller) FinalGetURL() string {
- return p.FinalGET
-}
-
-func (p *Poller) Status() string {
- return p.CurState
+ return p.resp, nil
}
-func getValue(resp *http.Response, val string) (string, error) {
- jsonBody, err := shared.GetJSON(resp)
- if err != nil {
- return "", err
+func (p *Poller[T]) Result(ctx context.Context, out *T) error {
+ var req *exported.Request
+ var err error
+ if p.FinalState == pollers.FinalStateViaLocation && p.LocURL != "" {
+ req, err = exported.NewRequest(ctx, http.MethodGet, p.LocURL)
+ } else if p.FinalState == pollers.FinalStateViaOpLocation && p.Method == http.MethodPost {
+ // no final GET required, terminal response should have it
+ } else if rl, rlErr := pollers.GetResourceLocation(p.resp); rlErr != nil && !errors.Is(rlErr, pollers.ErrNoBody) {
+ return rlErr
+ } else if rl != "" {
+ req, err = exported.NewRequest(ctx, http.MethodGet, rl)
+ } else if p.Method == http.MethodPatch || p.Method == http.MethodPut {
+ req, err = exported.NewRequest(ctx, http.MethodGet, p.OrigURL)
+ } else if p.Method == http.MethodPost && p.LocURL != "" {
+ req, err = exported.NewRequest(ctx, http.MethodGet, p.LocURL)
}
- v, ok := jsonBody[val]
- if !ok {
- // it might be ok if the field doesn't exist, the caller must make that determination
- return "", nil
+ if err != nil {
+ return err
}
- vv, ok := v.(string)
- if !ok {
- return "", fmt.Errorf("the %s value %v was not in string format", val, v)
+
+ // if a final GET request has been created, execute it
+ if req != nil {
+ resp, err := p.pl.Do(req)
+ if err != nil {
+ return err
+ }
+ p.resp = resp
}
- return vv, nil
+
+ return pollers.ResultHelper(p.resp, pollers.Failed(p.CurState), out)
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/poller.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/poller.go
index a6bccbfb5e0..37ed647f4e0 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/poller.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/poller.go
@@ -1,212 +1,24 @@
-//go:build go1.16
-// +build go1.16
+//go:build go1.18
+// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package pollers
-import (
- "context"
- "encoding/json"
- "errors"
- "fmt"
- "net/http"
- "reflect"
- "time"
+// FinalStateVia is the enumerated type for the possible final-state-via values.
+type FinalStateVia string
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pipeline"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared"
- "github.com/Azure/azure-sdk-for-go/sdk/internal/log"
-)
-
-// KindFromToken extracts the poller kind from the provided token.
-// If the pollerID doesn't match what's in the token an error is returned.
-func KindFromToken(pollerID, token string) (string, error) {
- // unmarshal into JSON object to determine the poller type
- obj := map[string]interface{}{}
- err := json.Unmarshal([]byte(token), &obj)
- if err != nil {
- return "", err
- }
- t, ok := obj["type"]
- if !ok {
- return "", errors.New("missing type field")
- }
- tt, ok := t.(string)
- if !ok {
- return "", fmt.Errorf("invalid type format %T", t)
- }
- ttID, ttKind, err := DecodeID(tt)
- if err != nil {
- return "", err
- }
- // ensure poller types match
- if ttID != pollerID {
- return "", fmt.Errorf("cannot resume from this poller token. expected %s, received %s", pollerID, ttID)
- }
- return ttKind, nil
-}
-
-// PollerType returns the concrete type of the poller (FOR TESTING PURPOSES).
-func PollerType(p *Poller) reflect.Type {
- return reflect.TypeOf(p.lro)
-}
-
-// NewPoller creates a Poller from the specified input.
-func NewPoller(lro Operation, resp *http.Response, pl pipeline.Pipeline) *Poller {
- return &Poller{lro: lro, pl: pl, resp: resp}
-}
+const (
+ // FinalStateViaAzureAsyncOp indicates the final payload comes from the Azure-AsyncOperation URL.
+ FinalStateViaAzureAsyncOp FinalStateVia = "azure-async-operation"
-// Poller encapsulates state and logic for polling on long-running operations.
-type Poller struct {
- lro Operation
- pl pipeline.Pipeline
- resp *http.Response
- err error
-}
+ // FinalStateViaLocation indicates the final payload comes from the Location URL.
+ FinalStateViaLocation FinalStateVia = "location"
-// Done returns true if the LRO has reached a terminal state.
-func (l *Poller) Done() bool {
- if l.err != nil {
- return true
- }
- return l.lro.Done()
-}
+ // FinalStateViaOriginalURI indicates the final payload comes from the original URL.
+ FinalStateViaOriginalURI FinalStateVia = "original-uri"
-// Poll sends a polling request to the polling endpoint and returns the response or error.
-func (l *Poller) Poll(ctx context.Context) (*http.Response, error) {
- if l.Done() {
- // the LRO has reached a terminal state, don't poll again
- if l.resp != nil {
- return l.resp, nil
- }
- return nil, l.err
- }
- req, err := pipeline.NewRequest(ctx, http.MethodGet, l.lro.URL())
- if err != nil {
- return nil, err
- }
- resp, err := l.pl.Do(req)
- if err != nil {
- // don't update the poller for failed requests
- return nil, err
- }
- defer resp.Body.Close()
- if !StatusCodeValid(resp) {
- // the LRO failed. unmarshall the error and update state
- l.err = shared.NewResponseError(resp)
- l.resp = nil
- return nil, l.err
- }
- if err = l.lro.Update(resp); err != nil {
- return nil, err
- }
- l.resp = resp
- log.Writef(log.EventLRO, "Status %s", l.lro.Status())
- if Failed(l.lro.Status()) {
- l.err = shared.NewResponseError(resp)
- l.resp = nil
- return nil, l.err
- }
- return l.resp, nil
-}
-
-// ResumeToken returns a token string that can be used to resume a poller that has not yet reached a terminal state.
-func (l *Poller) ResumeToken() (string, error) {
- if l.Done() {
- return "", errors.New("cannot create a ResumeToken from a poller in a terminal state")
- }
- b, err := json.Marshal(l.lro)
- if err != nil {
- return "", err
- }
- return string(b), nil
-}
-
-// FinalResponse will perform a final GET request and return the final HTTP response for the polling
-// operation and unmarshall the content of the payload into the respType interface that is provided.
-func (l *Poller) FinalResponse(ctx context.Context, respType interface{}) (*http.Response, error) {
- if !l.Done() {
- return nil, errors.New("cannot return a final response from a poller in a non-terminal state")
- }
- // update l.resp with the content from final GET if applicable
- if u := l.lro.FinalGetURL(); u != "" {
- log.Write(log.EventLRO, "Performing final GET.")
- req, err := pipeline.NewRequest(ctx, http.MethodGet, u)
- if err != nil {
- return nil, err
- }
- resp, err := l.pl.Do(req)
- if err != nil {
- return nil, err
- }
- if !StatusCodeValid(resp) {
- return nil, shared.NewResponseError(resp)
- }
- l.resp = resp
- }
- // if there's nothing to unmarshall into or no response body just return the final response
- if respType == nil {
- return l.resp, nil
- } else if l.resp.StatusCode == http.StatusNoContent || l.resp.ContentLength == 0 {
- log.Write(log.EventLRO, "final response specifies a response type but no payload was received")
- return l.resp, nil
- }
- body, err := shared.Payload(l.resp)
- if err != nil {
- return nil, err
- }
- if err = json.Unmarshal(body, respType); err != nil {
- return nil, err
- }
- return l.resp, nil
-}
-
-// PollUntilDone will handle the entire span of the polling operation until a terminal state is reached,
-// then return the final HTTP response for the polling operation and unmarshal the content of the payload
-// into the respType interface that is provided.
-// freq - the time to wait between intervals in absence of a Retry-After header. Minimum is one second.
-func (l *Poller) PollUntilDone(ctx context.Context, freq time.Duration, respType interface{}) (*http.Response, error) {
- if freq < time.Second {
- return nil, errors.New("polling frequency minimum is one second")
- }
- start := time.Now()
- logPollUntilDoneExit := func(v interface{}) {
- log.Writef(log.EventLRO, "END PollUntilDone() for %T: %v, total time: %s", l.lro, v, time.Since(start))
- }
- log.Writef(log.EventLRO, "BEGIN PollUntilDone() for %T", l.lro)
- if l.resp != nil {
- // initial check for a retry-after header existing on the initial response
- if retryAfter := shared.RetryAfter(l.resp); retryAfter > 0 {
- log.Writef(log.EventLRO, "initial Retry-After delay for %s", retryAfter.String())
- if err := shared.Delay(ctx, retryAfter); err != nil {
- logPollUntilDoneExit(err)
- return nil, err
- }
- }
- }
- // begin polling the endpoint until a terminal state is reached
- for {
- resp, err := l.Poll(ctx)
- if err != nil {
- logPollUntilDoneExit(err)
- return nil, err
- }
- if l.Done() {
- logPollUntilDoneExit(l.lro.Status())
- return l.FinalResponse(ctx, respType)
- }
- d := freq
- if retryAfter := shared.RetryAfter(resp); retryAfter > 0 {
- log.Writef(log.EventLRO, "Retry-After delay for %s", retryAfter.String())
- d = retryAfter
- } else {
- log.Writef(log.EventLRO, "delay for %s", d.String())
- }
- if err = shared.Delay(ctx, d); err != nil {
- logPollUntilDoneExit(err)
- return nil, err
- }
- }
-}
+ // FinalStateViaOpLocation indicates the final payload comes from the Operation-Location URL.
+ FinalStateViaOpLocation FinalStateVia = "operation-location"
+)
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/util.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/util.go
index dca70b5a596..17ab7dadc3f 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/util.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/util.go
@@ -1,5 +1,5 @@
-//go:build go1.16
-// +build go1.16
+//go:build go1.18
+// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
@@ -7,14 +7,21 @@
package pollers
import (
+ "context"
+ "encoding/json"
+ "errors"
"fmt"
"net/http"
"net/url"
+ "reflect"
"strings"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared"
)
+// the well-known set of LRO status/provisioning state values.
const (
StatusSucceeded = "Succeeded"
StatusCanceled = "Canceled"
@@ -22,15 +29,6 @@ const (
StatusInProgress = "InProgress"
)
-// Operation abstracts the differences between concrete poller types.
-type Operation interface {
- Done() bool
- Update(resp *http.Response) error
- FinalGetURL() string
- URL() string
- Status() string
-}
-
// IsTerminalState returns true if the LRO's state is terminal.
func IsTerminalState(s string) bool {
return strings.EqualFold(s, StatusSucceeded) || strings.EqualFold(s, StatusFailed) || strings.EqualFold(s, StatusCanceled)
@@ -41,9 +39,14 @@ func Failed(s string) bool {
return strings.EqualFold(s, StatusFailed) || strings.EqualFold(s, StatusCanceled)
}
+// Succeeded returns true if the LRO's state is terminal success.
+func Succeeded(s string) bool {
+ return strings.EqualFold(s, StatusSucceeded)
+}
+
// returns true if the LRO response contains a valid HTTP status code
func StatusCodeValid(resp *http.Response) bool {
- return shared.HasStatusCode(resp, http.StatusOK, http.StatusAccepted, http.StatusCreated, http.StatusNoContent)
+ return exported.HasStatusCode(resp, http.StatusOK, http.StatusAccepted, http.StatusCreated, http.StatusNoContent)
}
// IsValidURL verifies that the URL is valid and absolute.
@@ -52,48 +55,263 @@ func IsValidURL(s string) bool {
return err == nil && u.IsAbs()
}
-const idSeparator = ";"
+// getTokenTypeName creates a type name from the type parameter T.
+func getTokenTypeName[T any]() (string, error) {
+ tt := shared.TypeOfT[T]()
+ var n string
+ if tt.Kind() == reflect.Pointer {
+ n = "*"
+ tt = tt.Elem()
+ }
+ n += tt.Name()
+ if n == "" {
+ return "", errors.New("nameless types are not allowed")
+ }
+ return n, nil
+}
+
+type resumeTokenWrapper[T any] struct {
+ Type string `json:"type"`
+ Token T `json:"token"`
+}
+
+// NewResumeToken creates a resume token from the specified type.
+// An error is returned if the generic type has no name (e.g. struct{}).
+func NewResumeToken[TResult, TSource any](from TSource) (string, error) {
+ n, err := getTokenTypeName[TResult]()
+ if err != nil {
+ return "", err
+ }
+ b, err := json.Marshal(resumeTokenWrapper[TSource]{
+ Type: n,
+ Token: from,
+ })
+ if err != nil {
+ return "", err
+ }
+ return string(b), nil
+}
+
+// ExtractToken returns the poller-specific token information from the provided token value.
+func ExtractToken(token string) ([]byte, error) {
+ raw := map[string]json.RawMessage{}
+ if err := json.Unmarshal([]byte(token), &raw); err != nil {
+ return nil, err
+ }
+ // this is dependent on the type resumeTokenWrapper[T]
+ tk, ok := raw["token"]
+ if !ok {
+ return nil, errors.New("missing token value")
+ }
+ return tk, nil
+}
+
+// IsTokenValid returns an error if the specified token isn't applicable for generic type T.
+func IsTokenValid[T any](token string) error {
+ raw := map[string]interface{}{}
+ if err := json.Unmarshal([]byte(token), &raw); err != nil {
+ return err
+ }
+ t, ok := raw["type"]
+ if !ok {
+ return errors.New("missing type value")
+ }
+ tt, ok := t.(string)
+ if !ok {
+ return fmt.Errorf("invalid type format %T", t)
+ }
+ n, err := getTokenTypeName[T]()
+ if err != nil {
+ return err
+ }
+ if tt != n {
+ return fmt.Errorf("cannot resume from this poller token. token is for type %s, not %s", tt, n)
+ }
+ return nil
+}
+
+// ErrNoBody is returned if the response didn't contain a body.
+var ErrNoBody = errors.New("the response did not contain a body")
+
+// GetJSON reads the response body into a raw JSON object.
+// It returns ErrNoBody if there was no content.
+func GetJSON(resp *http.Response) (map[string]interface{}, error) {
+ body, err := exported.Payload(resp)
+ if err != nil {
+ return nil, err
+ }
+ if len(body) == 0 {
+ return nil, ErrNoBody
+ }
+ // unmarshall the body to get the value
+ var jsonBody map[string]interface{}
+ if err = json.Unmarshal(body, &jsonBody); err != nil {
+ return nil, err
+ }
+ return jsonBody, nil
+}
+
+// provisioningState returns the provisioning state from the response or the empty string.
+func provisioningState(jsonBody map[string]interface{}) string {
+ jsonProps, ok := jsonBody["properties"]
+ if !ok {
+ return ""
+ }
+ props, ok := jsonProps.(map[string]interface{})
+ if !ok {
+ return ""
+ }
+ rawPs, ok := props["provisioningState"]
+ if !ok {
+ return ""
+ }
+ ps, ok := rawPs.(string)
+ if !ok {
+ return ""
+ }
+ return ps
+}
+
+// status returns the status from the response or the empty string.
+func status(jsonBody map[string]interface{}) string {
+ rawStatus, ok := jsonBody["status"]
+ if !ok {
+ return ""
+ }
+ status, ok := rawStatus.(string)
+ if !ok {
+ return ""
+ }
+ return status
+}
-// MakeID returns the poller ID from the provided values.
-func MakeID(pollerID string, kind string) string {
- return fmt.Sprintf("%s%s%s", pollerID, idSeparator, kind)
+// GetStatus returns the LRO's status from the response body.
+// Typically used for Azure-AsyncOperation flows.
+// If there is no status in the response body the empty string is returned.
+func GetStatus(resp *http.Response) (string, error) {
+ jsonBody, err := GetJSON(resp)
+ if err != nil {
+ return "", err
+ }
+ return status(jsonBody), nil
}
-// DecodeID decodes the poller ID, returning [pollerID, kind] or an error.
-func DecodeID(tk string) (string, string, error) {
- raw := strings.Split(tk, idSeparator)
- // strings.Split will include any/all whitespace strings, we want to omit those
- parts := []string{}
- for _, r := range raw {
- if s := strings.TrimSpace(r); s != "" {
- parts = append(parts, s)
- }
+// GetProvisioningState returns the LRO's state from the response body.
+// If there is no state in the response body the empty string is returned.
+func GetProvisioningState(resp *http.Response) (string, error) {
+ jsonBody, err := GetJSON(resp)
+ if err != nil {
+ return "", err
}
- if len(parts) != 2 {
- return "", "", fmt.Errorf("invalid token %s", tk)
+ return provisioningState(jsonBody), nil
+}
+
+// GetResourceLocation returns the LRO's resourceLocation value from the response body.
+// Typically used for Operation-Location flows.
+// If there is no resourceLocation in the response body the empty string is returned.
+func GetResourceLocation(resp *http.Response) (string, error) {
+ jsonBody, err := GetJSON(resp)
+ if err != nil {
+ return "", err
+ }
+ v, ok := jsonBody["resourceLocation"]
+ if !ok {
+ // it might be ok if the field doesn't exist, the caller must make that determination
+ return "", nil
+ }
+ vv, ok := v.(string)
+ if !ok {
+ return "", fmt.Errorf("the resourceLocation value %v was not in string format", v)
}
- return parts[0], parts[1], nil
+ return vv, nil
}
// used if the operation synchronously completed
-type NopPoller struct{}
+type NopPoller[T any] struct {
+ resp *http.Response
+ result T
+}
-func (*NopPoller) URL() string {
- return ""
+// NewNopPoller creates a NopPoller from the provided response.
+// It unmarshals the response body into an instance of T.
+func NewNopPoller[T any](resp *http.Response) (*NopPoller[T], error) {
+ np := &NopPoller[T]{resp: resp}
+ if resp.StatusCode == http.StatusNoContent {
+ return np, nil
+ }
+ payload, err := exported.Payload(resp)
+ if err != nil {
+ return nil, err
+ }
+ if len(payload) == 0 {
+ return np, nil
+ }
+ if err = json.Unmarshal(payload, &np.result); err != nil {
+ return nil, err
+ }
+ return np, nil
}
-func (*NopPoller) Done() bool {
+func (*NopPoller[T]) Done() bool {
return true
}
-func (*NopPoller) Update(*http.Response) error {
+func (p *NopPoller[T]) Poll(context.Context) (*http.Response, error) {
+ return p.resp, nil
+}
+
+func (p *NopPoller[T]) Result(ctx context.Context, out *T) error {
+ *out = p.result
return nil
}
-func (*NopPoller) FinalGetURL() string {
- return ""
+// PollHelper creates and executes the request, calling update() with the response.
+// If the request fails, the update func is not called.
+// The update func returns the state of the operation for logging purposes or an error
+// if it fails to extract the required state from the response.
+func PollHelper(ctx context.Context, endpoint string, pl exported.Pipeline, update func(resp *http.Response) (string, error)) error {
+ req, err := exported.NewRequest(ctx, http.MethodGet, endpoint)
+ if err != nil {
+ return err
+ }
+ resp, err := pl.Do(req)
+ if err != nil {
+ return err
+ }
+ state, err := update(resp)
+ if err != nil {
+ return err
+ }
+ log.Writef(log.EventLRO, "State %s", state)
+ return nil
}
-func (*NopPoller) Status() string {
- return StatusSucceeded
+// ResultHelper processes the response as success or failure.
+// In the success case, it unmarshals the payload into either a new instance of T or out.
+// In the failure case, it creates an *azcore.Response error from the response.
+func ResultHelper[T any](resp *http.Response, failed bool, out *T) error {
+ // short-circuit the simple success case with no response body to unmarshal
+ if resp.StatusCode == http.StatusNoContent {
+ return nil
+ }
+
+ defer resp.Body.Close()
+ if !StatusCodeValid(resp) || failed {
+ // the LRO failed. unmarshall the error and update state
+ return exported.NewResponseError(resp)
+ }
+
+ // success case
+ payload, err := exported.Payload(resp)
+ if err != nil {
+ return err
+ }
+ if len(payload) == 0 {
+ return nil
+ }
+
+ if err = json.Unmarshal(payload, out); err != nil {
+ return err
+ }
+ return nil
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go
index 54b04f68b07..5b88e56dfaa 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go
@@ -1,5 +1,5 @@
-//go:build go1.16
-// +build go1.16
+//go:build go1.18
+// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
@@ -21,11 +21,6 @@ const (
HeaderOperationLocation = "Operation-Location"
HeaderRetryAfter = "Retry-After"
HeaderUserAgent = "User-Agent"
- HeaderXmsDate = "x-ms-date"
-)
-
-const (
- DefaultMaxRetries = 3
)
const BearerTokenPrefix = "Bearer "
@@ -35,5 +30,5 @@ const (
Module = "azcore"
// Version is the semantic version (see http://semver.org) of this module.
- Version = "v0.21.1"
+ Version = "v1.1.3"
)
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/shared.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/shared.go
index 2ee59f52a7d..96eef2956ff 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/shared.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/shared.go
@@ -1,5 +1,5 @@
-//go:build go1.16
-// +build go1.16
+//go:build go1.18
+// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
@@ -8,56 +8,22 @@ package shared
import (
"context"
- "encoding/json"
"errors"
"io"
- "io/ioutil"
"net/http"
- "net/url"
+ "reflect"
"strconv"
- "strings"
"time"
)
-// TokenRequestOptions contain specific parameter that may be used by credentials types when attempting to get a token.
-type TokenRequestOptions struct {
- // Scopes contains the list of permission scopes required for the token.
- Scopes []string
- // TenantID contains the tenant ID to use in a multi-tenant authentication scenario, if TenantID is set
- // it will override the tenant ID that was added at credential creation time.
- TenantID string
-}
-
-// TokenCredential represents a credential capable of providing an OAuth token.
-type TokenCredential interface {
- // GetToken requests an access token for the specified set of scopes.
- GetToken(ctx context.Context, options TokenRequestOptions) (*AccessToken, error)
-}
-
-// AccessToken represents an Azure service bearer access token with expiry information.
-type AccessToken struct {
- Token string
- ExpiresOn time.Time
-}
-
// CtxWithHTTPHeaderKey is used as a context key for adding/retrieving http.Header.
type CtxWithHTTPHeaderKey struct{}
// CtxWithRetryOptionsKey is used as a context key for adding/retrieving RetryOptions.
type CtxWithRetryOptionsKey struct{}
-type nopCloser struct {
- io.ReadSeeker
-}
-
-func (n nopCloser) Close() error {
- return nil
-}
-
-// NopCloser returns a ReadSeekCloser with a no-op close method wrapping the provided io.ReadSeeker.
-func NopCloser(rs io.ReadSeeker) io.ReadSeekCloser {
- return nopCloser{rs}
-}
+// CtxIncludeResponseKey is used as a context key for retrieving the raw response.
+type CtxIncludeResponseKey struct{}
// Delay waits for the duration to elapse or the context to be cancelled.
func Delay(ctx context.Context, delay time.Duration) error {
@@ -69,27 +35,6 @@ func Delay(ctx context.Context, delay time.Duration) error {
}
}
-// ErrNoBody is returned if the response didn't contain a body.
-var ErrNoBody = errors.New("the response did not contain a body")
-
-// GetJSON reads the response body into a raw JSON object.
-// It returns ErrNoBody if there was no content.
-func GetJSON(resp *http.Response) (map[string]interface{}, error) {
- body, err := Payload(resp)
- if err != nil {
- return nil, err
- }
- if len(body) == 0 {
- return nil, ErrNoBody
- }
- // unmarshall the body to get the value
- var jsonBody map[string]interface{}
- if err = json.Unmarshal(body, &jsonBody); err != nil {
- return nil, err
- }
- return jsonBody, nil
-}
-
// RetryAfter returns non-zero if the response contains a Retry-After header value.
func RetryAfter(resp *http.Response) time.Duration {
if resp == nil {
@@ -109,34 +54,21 @@ func RetryAfter(resp *http.Response) time.Duration {
return 0
}
-// HasStatusCode returns true if the Response's status code is one of the specified values.
-func HasStatusCode(resp *http.Response, statusCodes ...int) bool {
- if resp == nil {
- return false
- }
- for _, sc := range statusCodes {
- if resp.StatusCode == sc {
- return true
- }
- }
- return false
+// TypeOfT returns the type of the generic type param.
+func TypeOfT[T any]() reflect.Type {
+ // you can't, at present, obtain the type of
+ // a type parameter, so this is the trick
+ return reflect.TypeOf((*T)(nil)).Elem()
}
-// Payload reads and returns the response body or an error.
-// On a successful read, the response body is cached.
-// Subsequent reads will access the cached value.
-func Payload(resp *http.Response) ([]byte, error) {
- // r.Body won't be a nopClosingBytesReader if downloading was skipped
- if buf, ok := resp.Body.(*NopClosingBytesReader); ok {
- return buf.Bytes(), nil
- }
- bytesBody, err := ioutil.ReadAll(resp.Body)
- resp.Body.Close()
- if err != nil {
- return nil, err
- }
- resp.Body = &NopClosingBytesReader{s: bytesBody, i: 0}
- return bytesBody, nil
+// BytesSetter abstracts replacing a byte slice on some type.
+type BytesSetter interface {
+ Set(b []byte)
+}
+
+// NewNopClosingBytesReader creates a new *NopClosingBytesReader for the specified slice.
+func NewNopClosingBytesReader(data []byte) *NopClosingBytesReader {
+ return &NopClosingBytesReader{s: data}
}
// NopClosingBytesReader is an io.ReadSeekCloser around a byte slice.
@@ -146,11 +78,6 @@ type NopClosingBytesReader struct {
i int64
}
-// NewNopClosingBytesReader creates a new NopClosingBytesReader around the specified byte slice.
-func NewNopClosingBytesReader(data []byte) *NopClosingBytesReader {
- return &NopClosingBytesReader{s: data}
-}
-
// Bytes returns the underlying byte slice.
func (r *NopClosingBytesReader) Bytes() []byte {
return r.s
@@ -197,28 +124,12 @@ func (r *NopClosingBytesReader) Seek(offset int64, whence int) (int64, error) {
return i, nil
}
-const defaultScope = "/.default"
-const chinaCloudARMScope = "https://management.core.chinacloudapi.cn/" + defaultScope
-const publicCloudARMScope = "https://management.core.windows.net/" + defaultScope
-const usGovCloudARMScope = "https://management.core.usgovcloudapi.net/" + defaultScope
-
-// EndpointToScope converts the provided URL endpoint to its default scope.
-func EndpointToScope(endpoint string) string {
- parsed, err := url.Parse(endpoint)
- if err == nil {
- host := parsed.Hostname()
- switch {
- case strings.HasSuffix(host, "management.azure.com"):
- return publicCloudARMScope
- case strings.HasSuffix(host, "management.usgovcloudapi.net"):
- return usGovCloudARMScope
- case strings.HasSuffix(host, "management.chinacloudapi.cn"):
- return chinaCloudARMScope
- }
- }
- // fall back to legacy behavior when endpoint doesn't parse or match a known cloud's ARM endpoint
- if endpoint[len(endpoint)-1] != '/' {
- endpoint += "/"
- }
- return endpoint + defaultScope
+var _ BytesSetter = (*NopClosingBytesReader)(nil)
+
+// TransportFunc is a helper to use a first-class func to satisfy the Transporter interface.
+type TransportFunc func(*http.Request) (*http.Response, error)
+
+// Do implements the Transporter interface for the TransportFunc type.
+func (pf TransportFunc) Do(req *http.Request) (*http.Response, error) {
+ return pf(req)
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/log/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/log/doc.go
new file mode 100644
index 00000000000..2f3901bff3c
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/log/doc.go
@@ -0,0 +1,10 @@
+//go:build go1.18
+// +build go1.18
+
+// Copyright 2017 Microsoft Corporation. All rights reserved.
+// Use of this source code is governed by an MIT
+// license that can be found in the LICENSE file.
+
+// Package log contains functionality for configuring logging behavior.
+// Default logging to stderr can be enabled by setting environment variable AZURE_SDK_GO_LOGGING to "all".
+package log
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/log/log.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/log/log.go
new file mode 100644
index 00000000000..7bde29d0a46
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/log/log.go
@@ -0,0 +1,50 @@
+//go:build go1.18
+// +build go1.18
+
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+// Package log provides functionality for configuring logging facilities.
+package log
+
+import (
+ "github.com/Azure/azure-sdk-for-go/sdk/internal/log"
+)
+
+// Event is used to group entries. Each group can be toggled on or off.
+type Event = log.Event
+
+const (
+ // EventRequest entries contain information about HTTP requests.
+ // This includes information like the URL, query parameters, and headers.
+ EventRequest Event = "Request"
+
+ // EventResponse entries contain information about HTTP responses.
+ // This includes information like the HTTP status code, headers, and request URL.
+ EventResponse Event = "Response"
+
+ // EventRetryPolicy entries contain information specific to the retry policy in use.
+ EventRetryPolicy Event = "Retry"
+
+ // EventLRO entries contain information specific to long-running operations.
+ // This includes information like polling location, operation state, and sleep intervals.
+ EventLRO Event = "LongRunningOperation"
+)
+
+// SetEvents is used to control which events are written to
+// the log. By default all log events are writen.
+// NOTE: this is not goroutine safe and should be called before using SDK clients.
+func SetEvents(cls ...Event) {
+ log.SetEvents(cls...)
+}
+
+// SetListener will set the Logger to write to the specified Listener.
+// NOTE: this is not goroutine safe and should be called before using SDK clients.
+func SetListener(lst func(Event, string)) {
+ log.SetListener(lst)
+}
+
+// for testing purposes
+func resetEvents() {
+ log.TestResetEvents()
+}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/doc.go
index 572c7f119b8..fad2579ed6c 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/doc.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/doc.go
@@ -1,5 +1,5 @@
-//go:build go1.16
-// +build go1.16
+//go:build go1.18
+// +build go1.18
// Copyright 2017 Microsoft Corporation. All rights reserved.
// Use of this source code is governed by an MIT
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go
index d9b948a77e6..1ba320aef31 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go
@@ -1,5 +1,5 @@
-//go:build go1.16
-// +build go1.16
+//go:build go1.18
+// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
@@ -7,28 +7,29 @@
package policy
import (
- "context"
- "net/http"
"time"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pipeline"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported"
)
// Policy represents an extensibility point for the Pipeline that can mutate the specified
// Request and react to the received Response.
-type Policy = pipeline.Policy
+type Policy = exported.Policy
// Transporter represents an HTTP pipeline transport used to send HTTP requests and receive responses.
-type Transporter = pipeline.Transporter
+type Transporter = exported.Transporter
// Request is an abstraction over the creation of an HTTP request as it passes through the pipeline.
// Don't use this type directly, use runtime.NewRequest() instead.
-type Request = pipeline.Request
+type Request = exported.Request
// ClientOptions contains optional settings for a client's pipeline.
// All zero-value fields will be initialized with default values.
type ClientOptions struct {
+ // Cloud specifies a cloud for the client. The default is Azure Public Cloud.
+ Cloud cloud.Configuration
+
// Logging configures the built-in logging policy.
Logging LogOptions
@@ -68,7 +69,8 @@ type LogOptions struct {
}
// RetryOptions configures the retry policy's behavior.
-// Call NewRetryOptions() to create an instance with default values.
+// Zero-value fields will have their specified default values applied during use.
+// This allows for modification of a subset of fields.
type RetryOptions struct {
// MaxRetries specifies the maximum number of attempts a failed operation will be retried
// before producing an error.
@@ -81,6 +83,7 @@ type RetryOptions struct {
TryTimeout time.Duration
// RetryDelay specifies the initial amount of delay to use before retrying an operation.
+ // The value is used only if the HTTP response does not contain a Retry-After header.
// The delay increases exponentially with each retry up to the maximum specified by MaxRetryDelay.
// The default value is four seconds. A value less than zero means no delay between retries.
RetryDelay time.Duration
@@ -91,8 +94,15 @@ type RetryOptions struct {
MaxRetryDelay time.Duration
// StatusCodes specifies the HTTP status codes that indicate the operation should be retried.
- // The default value is the status codes in StatusCodesForRetry.
- // Specifying an empty slice will cause retries to happen only for transport errors.
+ // A nil slice will use the following values.
+ // http.StatusRequestTimeout 408
+ // http.StatusTooManyRequests 429
+ // http.StatusInternalServerError 500
+ // http.StatusBadGateway 502
+ // http.StatusServiceUnavailable 503
+ // http.StatusGatewayTimeout 504
+ // Specifying values will replace the default values.
+ // Specifying an empty slice will disable retries for HTTP status codes.
StatusCodes []int
}
@@ -107,22 +117,12 @@ type TelemetryOptions struct {
}
// TokenRequestOptions contain specific parameter that may be used by credentials types when attempting to get a token.
-type TokenRequestOptions = shared.TokenRequestOptions
+type TokenRequestOptions struct {
+ // Scopes contains the list of permission scopes required for the token.
+ Scopes []string
+}
// BearerTokenOptions configures the bearer token policy's behavior.
type BearerTokenOptions struct {
// placeholder for future options
}
-
-// WithHTTPHeader adds the specified http.Header to the parent context.
-// Use this to specify custom HTTP headers at the API-call level.
-// Any overlapping headers will have their values replaced with the values specified here.
-func WithHTTPHeader(parent context.Context, header http.Header) context.Context {
- return context.WithValue(parent, shared.CtxWithHTTPHeaderKey{}, header)
-}
-
-// WithRetryOptions adds the specified RetryOptions to the parent context.
-// Use this to specify custom RetryOptions at the API-call level.
-func WithRetryOptions(parent context.Context, options RetryOptions) context.Context {
- return context.WithValue(parent, shared.CtxWithRetryOptionsKey{}, options)
-}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/doc.go
index d3f5408def3..c9cfa438cb3 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/doc.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/doc.go
@@ -1,5 +1,5 @@
-//go:build go1.16
-// +build go1.16
+//go:build go1.18
+// +build go1.18
// Copyright 2017 Microsoft Corporation. All rights reserved.
// Use of this source code is governed by an MIT
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/errors.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/errors.go
index 0b19b5c74dc..6d03b291ebf 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/errors.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/errors.go
@@ -1,5 +1,5 @@
-//go:build go1.16
-// +build go1.16
+//go:build go1.18
+// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
@@ -9,11 +9,11 @@ package runtime
import (
"net/http"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported"
)
// NewResponseError creates an *azcore.ResponseError from the provided HTTP response.
// Call this when a service request returns a non-successful status code.
func NewResponseError(resp *http.Response) error {
- return shared.NewResponseError(resp)
+ return exported.NewResponseError(resp)
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pager.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pager.go
new file mode 100644
index 00000000000..5507665d651
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pager.go
@@ -0,0 +1,77 @@
+//go:build go1.18
+// +build go1.18
+
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+package runtime
+
+import (
+ "context"
+ "encoding/json"
+ "errors"
+)
+
+// PagingHandler contains the required data for constructing a Pager.
+type PagingHandler[T any] struct {
+ // More returns a boolean indicating if there are more pages to fetch.
+ // It uses the provided page to make the determination.
+ More func(T) bool
+
+ // Fetcher fetches the first and subsequent pages.
+ Fetcher func(context.Context, *T) (T, error)
+}
+
+// Pager provides operations for iterating over paged responses.
+type Pager[T any] struct {
+ current *T
+ handler PagingHandler[T]
+ firstPage bool
+}
+
+// NewPager creates an instance of Pager using the specified PagingHandler.
+// Pass a non-nil T for firstPage if the first page has already been retrieved.
+func NewPager[T any](handler PagingHandler[T]) *Pager[T] {
+ return &Pager[T]{
+ handler: handler,
+ firstPage: true,
+ }
+}
+
+// More returns true if there are more pages to retrieve.
+func (p *Pager[T]) More() bool {
+ if p.current != nil {
+ return p.handler.More(*p.current)
+ }
+ return true
+}
+
+// NextPage advances the pager to the next page.
+func (p *Pager[T]) NextPage(ctx context.Context) (T, error) {
+ var resp T
+ var err error
+ if p.current != nil {
+ if p.firstPage {
+ // we get here if it's an LRO-pager, we already have the first page
+ p.firstPage = false
+ return *p.current, nil
+ } else if !p.handler.More(*p.current) {
+ return *new(T), errors.New("no more pages")
+ }
+ resp, err = p.handler.Fetcher(ctx, p.current)
+ } else {
+ // non-LRO case, first page
+ p.firstPage = false
+ resp, err = p.handler.Fetcher(ctx, nil)
+ }
+ if err != nil {
+ return *new(T), err
+ }
+ p.current = &resp
+ return *p.current, nil
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface for Pager[T].
+func (p *Pager[T]) UnmarshalJSON(data []byte) error {
+ return json.Unmarshal(data, &p.current)
+}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pipeline.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pipeline.go
index 509d5fc503a..ad75ae2ab24 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pipeline.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pipeline.go
@@ -1,5 +1,5 @@
-//go:build go1.16
-// +build go1.16
+//go:build go1.18
+// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
@@ -7,7 +7,9 @@
package runtime
import (
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pipeline"
+ "net/http"
+
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
)
@@ -17,10 +19,13 @@ type PipelineOptions struct {
PerCall, PerRetry []policy.Policy
}
+// Pipeline represents a primitive for sending HTTP requests and receiving responses.
+// Its behavior can be extended by specifying policies during construction.
+type Pipeline = exported.Pipeline
+
// NewPipeline creates a pipeline from connection options, with any additional policies as specified.
-// module, version: used by the telemetry policy, when enabled
-// perCall: additional policies to invoke once per request
-// perRetry: additional policies to invoke once per request and once per retry of that request
+// Policies from ClientOptions are placed after policies from PipelineOptions.
+// The module and version parameters are used by the telemetry policy, when enabled.
func NewPipeline(module, version string, plOpts PipelineOptions, options *policy.ClientOptions) Pipeline {
cp := policy.ClientOptions{}
if options != nil {
@@ -38,20 +43,31 @@ func NewPipeline(module, version string, plOpts PipelineOptions, options *policy
qp = append(qp, cp.Logging.AllowedQueryParams...)
cp.Logging.AllowedQueryParams = qp
}
- policies := []policy.Policy{}
+ // we put the includeResponsePolicy at the very beginning so that the raw response
+ // is populated with the final response (some policies might mutate the response)
+ policies := []policy.Policy{policyFunc(includeResponsePolicy)}
if !cp.Telemetry.Disabled {
policies = append(policies, NewTelemetryPolicy(module, version, &cp.Telemetry))
}
- policies = append(policies, cp.PerCallPolicies...)
policies = append(policies, plOpts.PerCall...)
+ policies = append(policies, cp.PerCallPolicies...)
policies = append(policies, NewRetryPolicy(&cp.Retry))
- policies = append(policies, cp.PerRetryPolicies...)
policies = append(policies, plOpts.PerRetry...)
+ policies = append(policies, cp.PerRetryPolicies...)
policies = append(policies, NewLogPolicy(&cp.Logging))
- policies = append(policies, pipeline.PolicyFunc(httpHeaderPolicy), pipeline.PolicyFunc(bodyDownloadPolicy))
+ policies = append(policies, policyFunc(httpHeaderPolicy), policyFunc(bodyDownloadPolicy))
transport := cp.Transport
if transport == nil {
transport = defaultHTTPClient
}
- return pipeline.NewPipeline(transport, policies...)
+ return exported.NewPipeline(transport, policies...)
+}
+
+// policyFunc is a type that implements the Policy interface.
+// Use this type when implementing a stateless policy as a first-class function.
+type policyFunc func(*policy.Request) (*http.Response, error)
+
+// Do implements the Policy interface on policyFunc.
+func (pf policyFunc) Do(req *policy.Request) (*http.Response, error) {
+ return pf(req)
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go
index 187642d7fbf..71e3062be0b 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go
@@ -7,16 +7,18 @@ import (
"net/http"
"time"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
+ "github.com/Azure/azure-sdk-for-go/sdk/internal/temporal"
)
// BearerTokenPolicy authorizes requests with bearer tokens acquired from a TokenCredential.
type BearerTokenPolicy struct {
// mainResource is the resource to be retreived using the tenant specified in the credential
- mainResource *shared.ExpiringResource
+ mainResource *temporal.Resource[azcore.AccessToken, acquiringResourceState]
// the following fields are read-only
- cred shared.TokenCredential
+ cred azcore.TokenCredential
scopes []string
}
@@ -27,11 +29,10 @@ type acquiringResourceState struct {
// acquire acquires or updates the resource; only one
// thread/goroutine at a time ever calls this function
-func acquire(state interface{}) (newResource interface{}, newExpiration time.Time, err error) {
- s := state.(acquiringResourceState)
- tk, err := s.p.cred.GetToken(s.req.Raw().Context(), shared.TokenRequestOptions{Scopes: s.p.scopes})
+func acquire(state acquiringResourceState) (newResource azcore.AccessToken, newExpiration time.Time, err error) {
+ tk, err := state.p.cred.GetToken(state.req.Raw().Context(), policy.TokenRequestOptions{Scopes: state.p.scopes})
if err != nil {
- return nil, time.Time{}, err
+ return azcore.AccessToken{}, time.Time{}, err
}
return tk, tk.ExpiresOn, nil
}
@@ -40,11 +41,11 @@ func acquire(state interface{}) (newResource interface{}, newExpiration time.Tim
// cred: an azcore.TokenCredential implementation such as a credential object from azidentity
// scopes: the list of permission scopes required for the token.
// opts: optional settings. Pass nil to accept default values; this is the same as passing a zero-value options.
-func NewBearerTokenPolicy(cred shared.TokenCredential, scopes []string, opts *policy.BearerTokenOptions) *BearerTokenPolicy {
+func NewBearerTokenPolicy(cred azcore.TokenCredential, scopes []string, opts *policy.BearerTokenOptions) *BearerTokenPolicy {
return &BearerTokenPolicy{
cred: cred,
scopes: scopes,
- mainResource: shared.NewExpiringResource(acquire),
+ mainResource: temporal.NewResource(acquire),
}
}
@@ -54,12 +55,10 @@ func (b *BearerTokenPolicy) Do(req *policy.Request) (*http.Response, error) {
p: b,
req: req,
}
- tk, err := b.mainResource.GetResource(as)
+ tk, err := b.mainResource.Get(as)
if err != nil {
return nil, err
}
- if token, ok := tk.(*shared.AccessToken); ok {
- req.Raw().Header.Set(shared.HeaderAuthorization, shared.BearerTokenPrefix+token.Token)
- }
+ req.Raw().Header.Set(shared.HeaderAuthorization, shared.BearerTokenPrefix+tk.Token)
return req.Next()
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_body_download.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_body_download.go
index aa974e5f9d8..02d621ee89e 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_body_download.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_body_download.go
@@ -1,5 +1,5 @@
-//go:build go1.16
-// +build go1.16
+//go:build go1.18
+// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
@@ -11,7 +11,7 @@ import (
"net/http"
"strings"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
"github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo"
)
@@ -29,7 +29,7 @@ func bodyDownloadPolicy(req *policy.Request) (*http.Response, error) {
}
// Either bodyDownloadPolicyOpValues was not specified (so skip is false)
// or it was specified and skip is false: don't skip downloading the body
- _, err = shared.Payload(resp)
+ _, err = exported.Payload(resp)
if err != nil {
return resp, newBodyDownloadError(err, req)
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_http_header.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_http_header.go
index 148c6d9a313..770e0a2b6a6 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_http_header.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_http_header.go
@@ -1,5 +1,5 @@
-//go:build go1.16
-// +build go1.16
+//go:build go1.18
+// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
@@ -7,6 +7,7 @@
package runtime
import (
+ "context"
"net/http"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared"
@@ -29,3 +30,10 @@ func httpHeaderPolicy(req *policy.Request) (*http.Response, error) {
}
return req.Next()
}
+
+// WithHTTPHeader adds the specified http.Header to the parent context.
+// Use this to specify custom HTTP headers at the API-call level.
+// Any overlapping headers will have their values replaced with the values specified here.
+func WithHTTPHeader(parent context.Context, header http.Header) context.Context {
+ return context.WithValue(parent, shared.CtxWithHTTPHeaderKey{}, header)
+}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_include_response.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_include_response.go
new file mode 100644
index 00000000000..4714baa30cd
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_include_response.go
@@ -0,0 +1,34 @@
+//go:build go1.16
+// +build go1.16
+
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+package runtime
+
+import (
+ "context"
+ "net/http"
+
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
+)
+
+// includeResponsePolicy creates a policy that retrieves the raw HTTP response upon request
+func includeResponsePolicy(req *policy.Request) (*http.Response, error) {
+ resp, err := req.Next()
+ if resp == nil {
+ return resp, err
+ }
+ if httpOutRaw := req.Raw().Context().Value(shared.CtxIncludeResponseKey{}); httpOutRaw != nil {
+ httpOut := httpOutRaw.(**http.Response)
+ *httpOut = resp
+ }
+ return resp, err
+}
+
+// WithCaptureResponse applies the HTTP response retrieval annotation to the parent context.
+// The resp parameter will contain the HTTP response after the request has completed.
+func WithCaptureResponse(parent context.Context, resp **http.Response) context.Context {
+ return context.WithValue(parent, shared.CtxIncludeResponseKey{}, resp)
+}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_logging.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_logging.go
index 7d8f2d5d7a7..30a02a7a41b 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_logging.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_logging.go
@@ -1,5 +1,5 @@
-//go:build go1.16
-// +build go1.16
+//go:build go1.18
+// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
@@ -9,16 +9,16 @@ package runtime
import (
"bytes"
"fmt"
- "io/ioutil"
+ "io"
"net/http"
"sort"
"strings"
"time"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
"github.com/Azure/azure-sdk-for-go/sdk/internal/diag"
- "github.com/Azure/azure-sdk-for-go/sdk/internal/log"
)
type logPolicy struct {
@@ -56,6 +56,7 @@ func NewLogPolicy(o *policy.LogOptions) policy.Policy {
"traceparent": {},
"transfer-encoding": {},
"user-agent": {},
+ "www-authenticate": {},
"x-ms-request-id": {},
"x-ms-client-request-id": {},
"x-ms-return-client-request-id": {},
@@ -209,7 +210,7 @@ func writeReqBody(req *policy.Request, b *bytes.Buffer) error {
if ct := req.Raw().Header.Get(shared.HeaderContentType); !shouldLogBody(b, ct) {
return nil
}
- body, err := ioutil.ReadAll(req.Raw().Body)
+ body, err := io.ReadAll(req.Raw().Body)
if err != nil {
fmt.Fprintf(b, " Failed to read request body: %s\n", err.Error())
return err
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_request_id.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_request_id.go
index daefb970980..db70955b28b 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_request_id.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_request_id.go
@@ -1,5 +1,5 @@
-//go:build go1.16
-// +build go1.16
+//go:build go1.18
+// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
@@ -13,13 +13,14 @@ import (
"github.com/Azure/azure-sdk-for-go/sdk/internal/uuid"
)
-type requestIdPolicy struct{}
+type requestIDPolicy struct{}
-func NewRequestIdPolicy() policy.Policy {
- return &requestIdPolicy{}
+// NewRequestIDPolicy returns a policy that add the x-ms-client-request-id header
+func NewRequestIDPolicy() policy.Policy {
+ return &requestIDPolicy{}
}
-func (r *requestIdPolicy) Do(req *policy.Request) (*http.Response, error) {
+func (r *requestIDPolicy) Do(req *policy.Request) (*http.Response, error) {
const requestIdHeader = "x-ms-client-request-id"
if req.Raw().Header.Get(requestIdHeader) == "" {
id, err := uuid.New()
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_retry.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_retry.go
index 3bd1c267357..66247a599d2 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_retry.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_retry.go
@@ -1,5 +1,5 @@
-//go:build go1.16
-// +build go1.16
+//go:build go1.18
+// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
@@ -15,15 +15,19 @@ import (
"net/http"
"time"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
"github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo"
- "github.com/Azure/azure-sdk-for-go/sdk/internal/log"
+)
+
+const (
+ defaultMaxRetries = 3
)
func setDefaults(o *policy.RetryOptions) {
if o.MaxRetries == 0 {
- o.MaxRetries = shared.DefaultMaxRetries
+ o.MaxRetries = defaultMaxRetries
} else if o.MaxRetries < 0 {
o.MaxRetries = 0
}
@@ -34,11 +38,12 @@ func setDefaults(o *policy.RetryOptions) {
o.MaxRetryDelay = math.MaxInt64
}
if o.RetryDelay == 0 {
- o.RetryDelay = 4 * time.Second
+ o.RetryDelay = 800 * time.Millisecond
} else if o.RetryDelay < 0 {
o.RetryDelay = 0
}
if o.StatusCodes == nil {
+ // NOTE: if you change this list, you MUST update the docs in policy/policy.go
o.StatusCodes = []int{
http.StatusRequestTimeout, // 408
http.StatusTooManyRequests, // 429
@@ -123,7 +128,15 @@ func (p *retryPolicy) Do(req *policy.Request) (resp *http.Response, err error) {
tryCtx, tryCancel := context.WithTimeout(req.Raw().Context(), options.TryTimeout)
clone := req.Clone(tryCtx)
resp, err = clone.Next() // Make the request
- tryCancel()
+ // if the body was already downloaded or there was an error it's safe to cancel the context now
+ if err != nil {
+ tryCancel()
+ } else if _, ok := resp.Body.(*shared.NopClosingBytesReader); ok {
+ tryCancel()
+ } else {
+ // must cancel the context after the body has been read and closed
+ resp.Body = &contextCancelReadCloser{cf: tryCancel, body: resp.Body}
+ }
}
if err == nil {
log.Writef(log.EventRetryPolicy, "response %d", resp.StatusCode)
@@ -175,6 +188,12 @@ func (p *retryPolicy) Do(req *policy.Request) (resp *http.Response, err error) {
}
}
+// WithRetryOptions adds the specified RetryOptions to the parent context.
+// Use this to specify custom RetryOptions at the API-call level.
+func WithRetryOptions(parent context.Context, options policy.RetryOptions) context.Context {
+ return context.WithValue(parent, shared.CtxWithRetryOptionsKey{}, options)
+}
+
// ********** The following type/methods implement the retryableRequestBody (a ReadSeekCloser)
// This struct is used when sending a body to the network
@@ -203,3 +222,22 @@ func (b *retryableRequestBody) realClose() error {
}
return nil
}
+
+// ********** The following type/methods implement the contextCancelReadCloser
+
+// contextCancelReadCloser combines an io.ReadCloser with a cancel func.
+// it ensures the cancel func is invoked once the body has been read and closed.
+type contextCancelReadCloser struct {
+ cf context.CancelFunc
+ body io.ReadCloser
+}
+
+func (rc *contextCancelReadCloser) Read(p []byte) (n int, err error) {
+ return rc.body.Read(p)
+}
+
+func (rc *contextCancelReadCloser) Close() error {
+ err := rc.body.Close()
+ rc.cf()
+ return err
+}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_telemetry.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_telemetry.go
index 5e628e7a325..2abcdc576b6 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_telemetry.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_telemetry.go
@@ -1,5 +1,5 @@
-//go:build go1.16
-// +build go1.16
+//go:build go1.18
+// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
@@ -45,9 +45,6 @@ func NewTelemetryPolicy(mod, ver string, o *policy.TelemetryOptions) policy.Poli
}
b.WriteString(formatTelemetry(mod, ver))
b.WriteRune(' ')
- // inject azcore info
- b.WriteString(formatTelemetry(shared.Module, shared.Version))
- b.WriteRune(' ')
b.WriteString(platformInfo)
tp.telemetryValue = b.String()
return &tp
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go
index fbb364cdf9e..14c90fecfe5 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go
@@ -1,5 +1,5 @@
-//go:build go1.16
-// +build go1.16
+//go:build go1.18
+// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
@@ -7,65 +7,320 @@
package runtime
import (
+ "context"
"encoding/json"
"errors"
+ "flag"
"fmt"
"net/http"
+ "time"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pipeline"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/async"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/body"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op"
- "github.com/Azure/azure-sdk-for-go/sdk/internal/log"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared"
)
+// FinalStateVia is the enumerated type for the possible final-state-via values.
+type FinalStateVia = pollers.FinalStateVia
+
+const (
+ // FinalStateViaAzureAsyncOp indicates the final payload comes from the Azure-AsyncOperation URL.
+ FinalStateViaAzureAsyncOp = pollers.FinalStateViaAzureAsyncOp
+
+ // FinalStateViaLocation indicates the final payload comes from the Location URL.
+ FinalStateViaLocation = pollers.FinalStateViaLocation
+
+ // FinalStateViaOriginalURI indicates the final payload comes from the original URL.
+ FinalStateViaOriginalURI = pollers.FinalStateViaOriginalURI
+
+ // FinalStateViaOpLocation indicates the final payload comes from the Operation-Location URL.
+ FinalStateViaOpLocation = pollers.FinalStateViaOpLocation
+)
+
+// NewPollerOptions contains the optional parameters for NewPoller.
+type NewPollerOptions[T any] struct {
+ // FinalStateVia contains the final-state-via value for the LRO.
+ FinalStateVia FinalStateVia
+
+ // Response contains a preconstructed response type.
+ // The final payload will be unmarshaled into it and returned.
+ Response *T
+
+ // Handler[T] contains a custom polling implementation.
+ Handler PollingHandler[T]
+}
+
// NewPoller creates a Poller based on the provided initial response.
-// pollerID - a unique identifier for an LRO, it's usually the client.Method string.
-func NewPoller(pollerID string, resp *http.Response, pl pipeline.Pipeline) (*pollers.Poller, error) {
+func NewPoller[T any](resp *http.Response, pl exported.Pipeline, options *NewPollerOptions[T]) (*Poller[T], error) {
+ if options == nil {
+ options = &NewPollerOptions[T]{}
+ }
+ result := options.Response
+ if result == nil {
+ result = new(T)
+ }
+ if options.Handler != nil {
+ return &Poller[T]{
+ op: options.Handler,
+ resp: resp,
+ result: result,
+ }, nil
+ }
+
defer resp.Body.Close()
// this is a back-stop in case the swagger is incorrect (i.e. missing one or more status codes for success).
// ideally the codegen should return an error if the initial response failed and not even create a poller.
if !pollers.StatusCodeValid(resp) {
return nil, errors.New("the operation failed or was cancelled")
}
+
// determine the polling method
- var lro pollers.Operation
+ var opr PollingHandler[T]
var err error
- // op poller must be checked first as it can also have a location header
- if op.Applicable(resp) {
- lro, err = op.New(resp, pollerID)
+ if async.Applicable(resp) {
+ // async poller must be checked first as it can also have a location header
+ opr, err = async.New[T](pl, resp, options.FinalStateVia)
+ } else if op.Applicable(resp) {
+ // op poller must be checked before loc as it can also have a location header
+ opr, err = op.New[T](pl, resp, options.FinalStateVia)
} else if loc.Applicable(resp) {
- lro, err = loc.New(resp, pollerID)
+ opr, err = loc.New[T](pl, resp)
+ } else if body.Applicable(resp) {
+ // must test body poller last as it's a subset of the other pollers.
+ // TODO: this is ambiguous for PATCH/PUT if it returns a 200 with no polling headers (sync completion)
+ opr, err = body.New[T](pl, resp)
+ } else if m := resp.Request.Method; resp.StatusCode == http.StatusAccepted && (m == http.MethodDelete || m == http.MethodPost) {
+ // if we get here it means we have a 202 with no polling headers.
+ // for DELETE and POST this is a hard error per ARM RPC spec.
+ return nil, errors.New("response is missing polling URL")
} else {
- lro = &pollers.NopPoller{}
+ opr, err = pollers.NewNopPoller[T](resp)
}
+
if err != nil {
return nil, err
}
- return pollers.NewPoller(lro, resp, pl), nil
+ return &Poller[T]{
+ op: opr,
+ resp: resp,
+ result: result,
+ }, nil
+}
+
+// NewPollerFromResumeTokenOptions contains the optional parameters for NewPollerFromResumeToken.
+type NewPollerFromResumeTokenOptions[T any] struct {
+ // Response contains a preconstructed response type.
+ // The final payload will be unmarshaled into it and returned.
+ Response *T
+
+ // Handler[T] contains a custom polling implementation.
+ Handler PollingHandler[T]
}
// NewPollerFromResumeToken creates a Poller from a resume token string.
-// pollerID - a unique identifier for an LRO, it's usually the client.Method string.
-func NewPollerFromResumeToken(pollerID string, token string, pl pipeline.Pipeline) (*pollers.Poller, error) {
- kind, err := pollers.KindFromToken(pollerID, token)
+func NewPollerFromResumeToken[T any](token string, pl exported.Pipeline, options *NewPollerFromResumeTokenOptions[T]) (*Poller[T], error) {
+ if options == nil {
+ options = &NewPollerFromResumeTokenOptions[T]{}
+ }
+ result := options.Response
+ if result == nil {
+ result = new(T)
+ }
+
+ if err := pollers.IsTokenValid[T](token); err != nil {
+ return nil, err
+ }
+ raw, err := pollers.ExtractToken(token)
if err != nil {
return nil, err
}
+ var asJSON map[string]interface{}
+ if err := json.Unmarshal(raw, &asJSON); err != nil {
+ return nil, err
+ }
+
+ opr := options.Handler
// now rehydrate the poller based on the encoded poller type
- var lro pollers.Operation
- switch kind {
- case loc.Kind:
- log.Writef(log.EventLRO, "Resuming %s poller.", loc.Kind)
- lro = &loc.Poller{}
- case op.Kind:
- log.Writef(log.EventLRO, "Resuming %s poller.", op.Kind)
- lro = &op.Poller{}
- default:
- return nil, fmt.Errorf("unhandled poller type %s", kind)
- }
- if err = json.Unmarshal([]byte(token), lro); err != nil {
+ if async.CanResume(asJSON) {
+ opr, _ = async.New[T](pl, nil, "")
+ } else if body.CanResume(asJSON) {
+ opr, _ = body.New[T](pl, nil)
+ } else if loc.CanResume(asJSON) {
+ opr, _ = loc.New[T](pl, nil)
+ } else if op.CanResume(asJSON) {
+ opr, _ = op.New[T](pl, nil, "")
+ } else if opr != nil {
+ log.Writef(log.EventLRO, "Resuming custom poller %T.", opr)
+ } else {
+ return nil, fmt.Errorf("unhandled poller token %s", string(raw))
+ }
+ if err := json.Unmarshal(raw, &opr); err != nil {
+ return nil, err
+ }
+ return &Poller[T]{
+ op: opr,
+ result: result,
+ }, nil
+}
+
+// PollingHandler[T] abstracts the differences among poller implementations.
+type PollingHandler[T any] interface {
+ // Done returns true if the LRO has reached a terminal state.
+ Done() bool
+
+ // Poll fetches the latest state of the LRO.
+ Poll(context.Context) (*http.Response, error)
+
+ // Result is called once the LRO has reached a terminal state. It populates the out parameter
+ // with the result of the operation.
+ Result(ctx context.Context, out *T) error
+}
+
+// Poller encapsulates a long-running operation, providing polling facilities until the operation reaches a terminal state.
+type Poller[T any] struct {
+ op PollingHandler[T]
+ resp *http.Response
+ err error
+ result *T
+ done bool
+}
+
+// PollUntilDoneOptions contains the optional values for the Poller[T].PollUntilDone() method.
+type PollUntilDoneOptions struct {
+ // Frequency is the time to wait between polling intervals in absence of a Retry-After header. Allowed minimum is one second.
+ // Pass zero to accept the default value (30s).
+ Frequency time.Duration
+}
+
+// PollUntilDone will poll the service endpoint until a terminal state is reached, an error is received, or the context expires.
+// It internally uses Poll(), Done(), and Result() in its polling loop, sleeping for the specified duration between intervals.
+// options: pass nil to accept the default values.
+// NOTE: the default polling frequency is 30 seconds which works well for most operations. However, some operations might
+// benefit from a shorter or longer duration.
+func (p *Poller[T]) PollUntilDone(ctx context.Context, options *PollUntilDoneOptions) (T, error) {
+ if options == nil {
+ options = &PollUntilDoneOptions{}
+ }
+ cp := *options
+ if cp.Frequency == 0 {
+ cp.Frequency = 30 * time.Second
+ }
+
+ // skip the floor check when executing tests so they don't take so long
+ if isTest := flag.Lookup("test.v"); isTest == nil && cp.Frequency < time.Second {
+ return *new(T), errors.New("polling frequency minimum is one second")
+ }
+
+ start := time.Now()
+ logPollUntilDoneExit := func(v interface{}) {
+ log.Writef(log.EventLRO, "END PollUntilDone() for %T: %v, total time: %s", p.op, v, time.Since(start))
+ }
+ log.Writef(log.EventLRO, "BEGIN PollUntilDone() for %T", p.op)
+ if p.resp != nil {
+ // initial check for a retry-after header existing on the initial response
+ if retryAfter := shared.RetryAfter(p.resp); retryAfter > 0 {
+ log.Writef(log.EventLRO, "initial Retry-After delay for %s", retryAfter.String())
+ if err := shared.Delay(ctx, retryAfter); err != nil {
+ logPollUntilDoneExit(err)
+ return *new(T), err
+ }
+ }
+ }
+ // begin polling the endpoint until a terminal state is reached
+ for {
+ resp, err := p.Poll(ctx)
+ if err != nil {
+ logPollUntilDoneExit(err)
+ return *new(T), err
+ }
+ if p.Done() {
+ logPollUntilDoneExit("succeeded")
+ return p.Result(ctx)
+ }
+ d := cp.Frequency
+ if retryAfter := shared.RetryAfter(resp); retryAfter > 0 {
+ log.Writef(log.EventLRO, "Retry-After delay for %s", retryAfter.String())
+ d = retryAfter
+ } else {
+ log.Writef(log.EventLRO, "delay for %s", d.String())
+ }
+ if err = shared.Delay(ctx, d); err != nil {
+ logPollUntilDoneExit(err)
+ return *new(T), err
+ }
+ }
+}
+
+// Poll fetches the latest state of the LRO. It returns an HTTP response or error.
+// If Poll succeeds, the poller's state is updated and the HTTP response is returned.
+// If Poll fails, the poller's state is unmodified and the error is returned.
+// Calling Poll on an LRO that has reached a terminal state will return the last HTTP response.
+func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) {
+ if p.Done() {
+ // the LRO has reached a terminal state, don't poll again
+ return p.resp, nil
+ }
+ resp, err := p.op.Poll(ctx)
+ if err != nil {
return nil, err
}
- return pollers.NewPoller(lro, nil, pl), nil
+ p.resp = resp
+ return p.resp, nil
+}
+
+// Done returns true if the LRO has reached a terminal state.
+// Once a terminal state is reached, call Result().
+func (p *Poller[T]) Done() bool {
+ return p.op.Done()
+}
+
+// Result returns the result of the LRO and is meant to be used in conjunction with Poll and Done.
+// If the LRO completed successfully, a populated instance of T is returned.
+// If the LRO failed or was canceled, an *azcore.ResponseError error is returned.
+// Calling this on an LRO in a non-terminal state will return an error.
+func (p *Poller[T]) Result(ctx context.Context) (T, error) {
+ if !p.Done() {
+ return *new(T), errors.New("poller is in a non-terminal state")
+ }
+ if p.done {
+ // the result has already been retrieved, return the cached value
+ if p.err != nil {
+ return *new(T), p.err
+ }
+ return *p.result, nil
+ }
+ err := p.op.Result(ctx, p.result)
+ var respErr *exported.ResponseError
+ if errors.As(err, &respErr) {
+ // the LRO failed. record the error
+ p.err = err
+ } else if err != nil {
+ // the call to Result failed, don't cache anything in this case
+ return *new(T), err
+ }
+ p.done = true
+ if p.err != nil {
+ return *new(T), p.err
+ }
+ return *p.result, nil
+}
+
+// ResumeToken returns a value representing the poller that can be used to resume
+// the LRO at a later time. ResumeTokens are unique per service operation.
+// The token's format should be considered opaque and is subject to change.
+// Calling this on an LRO in a terminal state will return an error.
+func (p *Poller[T]) ResumeToken() (string, error) {
+ if p.Done() {
+ return "", errors.New("poller is in a terminal state")
+ }
+ tk, err := pollers.NewResumeToken[T](p.op)
+ if err != nil {
+ return "", err
+ }
+ return tk, err
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go
index 785b08a24a4..118588d828d 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go
@@ -1,5 +1,5 @@
-//go:build go1.16
-// +build go1.16
+//go:build go1.18
+// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
@@ -19,15 +19,11 @@ import (
"strings"
"time"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pipeline"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
)
-// Pipeline represents a primitive for sending HTTP requests and receiving responses.
-// Its behavior can be extended by specifying policies during construction.
-type Pipeline = pipeline.Pipeline
-
// Base64Encoding is usesd to specify which base-64 encoder/decoder to use when
// encoding/decoding a slice of bytes to/from a string.
type Base64Encoding int
@@ -41,8 +37,9 @@ const (
)
// NewRequest creates a new policy.Request with the specified input.
-func NewRequest(ctx context.Context, httpMethod string, endpoint string) (*pipeline.Request, error) {
- return pipeline.NewRequest(ctx, httpMethod, endpoint)
+// The endpoint MUST be properly encoded before calling this function.
+func NewRequest(ctx context.Context, httpMethod string, endpoint string) (*policy.Request, error) {
+ return exported.NewRequest(ctx, httpMethod, endpoint)
}
// JoinPaths concatenates multiple URL path segments into one path,
@@ -87,7 +84,7 @@ func EncodeByteArray(v []byte, format Base64Encoding) string {
func MarshalAsByteArray(req *policy.Request, v []byte, format Base64Encoding) error {
// send as a JSON string
encode := fmt.Sprintf("\"%s\"", EncodeByteArray(v, format))
- return req.SetBody(shared.NopCloser(strings.NewReader(encode)), shared.ContentTypeAppJSON)
+ return req.SetBody(exported.NopCloser(strings.NewReader(encode)), shared.ContentTypeAppJSON)
}
// MarshalAsJSON calls json.Marshal() to get the JSON encoding of v then calls SetBody.
@@ -97,7 +94,7 @@ func MarshalAsJSON(req *policy.Request, v interface{}) error {
if err != nil {
return fmt.Errorf("error marshalling type %T: %s", v, err)
}
- return req.SetBody(shared.NopCloser(bytes.NewReader(b)), shared.ContentTypeAppJSON)
+ return req.SetBody(exported.NopCloser(bytes.NewReader(b)), shared.ContentTypeAppJSON)
}
// MarshalAsXML calls xml.Marshal() to get the XML encoding of v then calls SetBody.
@@ -106,7 +103,9 @@ func MarshalAsXML(req *policy.Request, v interface{}) error {
if err != nil {
return fmt.Errorf("error marshalling type %T: %s", v, err)
}
- return req.SetBody(shared.NopCloser(bytes.NewReader(b)), shared.ContentTypeAppXML)
+ // inclue the XML header as some services require it
+ b = []byte(xml.Header + string(b))
+ return req.SetBody(exported.NopCloser(bytes.NewReader(b)), shared.ContentTypeAppXML)
}
// SetMultipartFormData writes the specified keys/values as multi-part form
@@ -140,7 +139,7 @@ func SetMultipartFormData(req *policy.Request, formData map[string]interface{})
if err := writer.Close(); err != nil {
return err
}
- return req.SetBody(shared.NopCloser(bytes.NewReader(body.Bytes())), writer.FormDataContentType())
+ return req.SetBody(exported.NopCloser(bytes.NewReader(body.Bytes())), writer.FormDataContentType())
}
// SkipBodyDownload will disable automatic downloading of the response body.
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/response.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/response.go
index 8df3ca8131f..f86ec0b95ea 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/response.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/response.go
@@ -1,5 +1,5 @@
-//go:build go1.16
-// +build go1.16
+//go:build go1.18
+// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
@@ -13,9 +13,9 @@ import (
"encoding/xml"
"fmt"
"io"
- "io/ioutil"
"net/http"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared"
)
@@ -23,12 +23,12 @@ import (
// On a successful read, the response body is cached.
// Subsequent reads will access the cached value.
func Payload(resp *http.Response) ([]byte, error) {
- return shared.Payload(resp)
+ return exported.Payload(resp)
}
// HasStatusCode returns true if the Response's status code is one of the specified values.
func HasStatusCode(resp *http.Response, statusCodes ...int) bool {
- return shared.HasStatusCode(resp, statusCodes...)
+ return exported.HasStatusCode(resp, statusCodes...)
}
// UnmarshalAsByteArray will base-64 decode the received payload and place the result into the value pointed to by v.
@@ -85,7 +85,7 @@ func UnmarshalAsXML(resp *http.Response, v interface{}) error {
// Drain reads the response body to completion then closes it. The bytes read are discarded.
func Drain(resp *http.Response) {
if resp != nil && resp.Body != nil {
- _, _ = io.Copy(ioutil.Discard, resp.Body)
+ _, _ = io.Copy(io.Discard, resp.Body)
resp.Body.Close()
}
}
@@ -99,7 +99,7 @@ func removeBOM(resp *http.Response) error {
// UTF8
trimmed := bytes.TrimPrefix(payload, []byte("\xef\xbb\xbf"))
if len(trimmed) < len(payload) {
- resp.Body.(*shared.NopClosingBytesReader).Set(trimmed)
+ resp.Body.(shared.BytesSetter).Set(trimmed)
}
return nil
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_http_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_http_client.go
index f7f3ca9c14e..869bed51184 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_http_client.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_http_client.go
@@ -1,5 +1,5 @@
-//go:build go1.16
-// +build go1.16
+//go:build go1.18
+// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming/doc.go
index b613f085bb6..cadaef3d584 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming/doc.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming/doc.go
@@ -1,5 +1,5 @@
-//go:build go1.16
-// +build go1.16
+//go:build go1.18
+// +build go1.18
// Copyright 2017 Microsoft Corporation. All rights reserved.
// Use of this source code is governed by an MIT
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming/progress.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming/progress.go
index ca0b05c8081..8563375af07 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming/progress.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming/progress.go
@@ -1,5 +1,5 @@
-//go:build go1.16
-// +build go1.16
+//go:build go1.18
+// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
@@ -9,7 +9,7 @@ package streaming
import (
"io"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported"
)
type progress struct {
@@ -21,7 +21,7 @@ type progress struct {
// NopCloser returns a ReadSeekCloser with a no-op close method wrapping the provided io.ReadSeeker.
func NopCloser(rs io.ReadSeeker) io.ReadSeekCloser {
- return shared.NopCloser(rs)
+ return exported.NopCloser(rs)
}
// NewRequestProgress adds progress reporting to an HTTP request's body stream.
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/to/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/to/doc.go
index 64733444fb5..faa98c9dc51 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/to/doc.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/to/doc.go
@@ -1,5 +1,5 @@
-//go:build go1.16
-// +build go1.16
+//go:build go1.18
+// +build go1.18
// Copyright 2017 Microsoft Corporation. All rights reserved.
// Use of this source code is governed by an MIT
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/to/to.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/to/to.go
index 01bb033ef03..e0e4817b90d 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/to/to.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/to/to.go
@@ -1,107 +1,21 @@
-//go:build go1.16
-// +build go1.16
+//go:build go1.18
+// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package to
-import "time"
-
-// BoolPtr returns a pointer to the provided bool.
-func BoolPtr(b bool) *bool {
- return &b
-}
-
-// Float32Ptr returns a pointer to the provided float32.
-func Float32Ptr(i float32) *float32 {
- return &i
-}
-
-// Float64Ptr returns a pointer to the provided float64.
-func Float64Ptr(i float64) *float64 {
- return &i
-}
-
-// Int32Ptr returns a pointer to the provided int32.
-func Int32Ptr(i int32) *int32 {
- return &i
-}
-
-// Int64Ptr returns a pointer to the provided int64.
-func Int64Ptr(i int64) *int64 {
- return &i
-}
-
-// StringPtr returns a pointer to the provided string.
-func StringPtr(s string) *string {
- return &s
-}
-
-// TimePtr returns a pointer to the provided time.Time.
-func TimePtr(t time.Time) *time.Time {
- return &t
-}
-
-// Int32PtrArray returns an array of *int32 from the specified values.
-func Int32PtrArray(vals ...int32) []*int32 {
- arr := make([]*int32, len(vals))
- for i := range vals {
- arr[i] = Int32Ptr(vals[i])
- }
- return arr
-}
-
-// Int64PtrArray returns an array of *int64 from the specified values.
-func Int64PtrArray(vals ...int64) []*int64 {
- arr := make([]*int64, len(vals))
- for i := range vals {
- arr[i] = Int64Ptr(vals[i])
- }
- return arr
-}
-
-// Float32PtrArray returns an array of *float32 from the specified values.
-func Float32PtrArray(vals ...float32) []*float32 {
- arr := make([]*float32, len(vals))
- for i := range vals {
- arr[i] = Float32Ptr(vals[i])
- }
- return arr
-}
-
-// Float64PtrArray returns an array of *float64 from the specified values.
-func Float64PtrArray(vals ...float64) []*float64 {
- arr := make([]*float64, len(vals))
- for i := range vals {
- arr[i] = Float64Ptr(vals[i])
- }
- return arr
-}
-
-// BoolPtrArray returns an array of *bool from the specified values.
-func BoolPtrArray(vals ...bool) []*bool {
- arr := make([]*bool, len(vals))
- for i := range vals {
- arr[i] = BoolPtr(vals[i])
- }
- return arr
-}
-
-// StringPtrArray returns an array of *string from the specified values.
-func StringPtrArray(vals ...string) []*string {
- arr := make([]*string, len(vals))
- for i := range vals {
- arr[i] = StringPtr(vals[i])
- }
- return arr
+// Ptr returns a pointer to the provided value.
+func Ptr[T any](v T) *T {
+ return &v
}
-// TimePtrArray returns an array of *time.Time from the specified values.
-func TimePtrArray(vals ...time.Time) []*time.Time {
- arr := make([]*time.Time, len(vals))
- for i := range vals {
- arr[i] = TimePtr(vals[i])
+// SliceOfPtrs returns a slice of *T from the specified values.
+func SliceOfPtrs[T any](vv ...T) []*T {
+ slc := make([]*T, len(vv))
+ for i := range vv {
+ slc[i] = Ptr(vv[i])
}
- return arr
+ return slc
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md
new file mode 100644
index 00000000000..670839fd441
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md
@@ -0,0 +1,295 @@
+# Release History
+
+## 1.1.0 (2022-06-07)
+
+### Features Added
+* `ClientCertificateCredential` and `ClientSecretCredential` support ESTS-R. First-party
+ applications can set environment variable `AZURE_REGIONAL_AUTHORITY_NAME` with a
+ region name.
+
+## 1.0.1 (2022-06-07)
+
+### Other Changes
+* Upgrade `microsoft-authentication-library-for-go` requirement to v0.5.1
+ ([#18176](https://github.com/Azure/azure-sdk-for-go/issues/18176))
+
+## 1.0.0 (2022-05-12)
+
+### Features Added
+* `DefaultAzureCredential` reads environment variable `AZURE_CLIENT_ID` for the
+ client ID of a user-assigned managed identity
+ ([#17293](https://github.com/Azure/azure-sdk-for-go/pull/17293))
+
+### Breaking Changes
+* Removed `AuthorizationCodeCredential`. Use `InteractiveBrowserCredential` instead
+ to authenticate a user with the authorization code flow.
+* Instances of `AuthenticationFailedError` are now returned by pointer.
+* `GetToken()` returns `azcore.AccessToken` by value
+
+### Bugs Fixed
+* `AzureCLICredential` panics after receiving an unexpected error type
+ ([#17490](https://github.com/Azure/azure-sdk-for-go/issues/17490))
+
+### Other Changes
+* `GetToken()` returns an error when the caller specifies no scope
+* Updated to the latest versions of `golang.org/x/crypto`, `azcore` and `internal`
+
+## 0.14.0 (2022-04-05)
+
+### Breaking Changes
+* This module now requires Go 1.18
+* Removed `AuthorityHost`. Credentials are now configured for sovereign or private
+ clouds with the API in `azcore/cloud`, for example:
+ ```go
+ // before
+ opts := azidentity.ClientSecretCredentialOptions{AuthorityHost: azidentity.AzureGovernment}
+ cred, err := azidentity.NewClientSecretCredential(tenantID, clientID, secret, &opts)
+
+ // after
+ import "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud"
+
+ opts := azidentity.ClientSecretCredentialOptions{}
+ opts.Cloud = cloud.AzureGovernment
+ cred, err := azidentity.NewClientSecretCredential(tenantID, clientID, secret, &opts)
+ ```
+
+## 0.13.2 (2022-03-08)
+
+### Bugs Fixed
+* Prevented a data race in `DefaultAzureCredential` and `ChainedTokenCredential`
+ ([#17144](https://github.com/Azure/azure-sdk-for-go/issues/17144))
+
+### Other Changes
+* Upgraded App Service managed identity version from 2017-09-01 to 2019-08-01
+ ([#17086](https://github.com/Azure/azure-sdk-for-go/pull/17086))
+
+## 0.13.1 (2022-02-08)
+
+### Features Added
+* `EnvironmentCredential` supports certificate SNI authentication when
+ `AZURE_CLIENT_SEND_CERTIFICATE_CHAIN` is "true".
+ ([#16851](https://github.com/Azure/azure-sdk-for-go/pull/16851))
+
+### Bugs Fixed
+* `ManagedIdentityCredential.GetToken()` now returns an error when configured for
+ a user assigned identity in Azure Cloud Shell (which doesn't support such identities)
+ ([#16946](https://github.com/Azure/azure-sdk-for-go/pull/16946))
+
+### Other Changes
+* `NewDefaultAzureCredential()` logs non-fatal errors. These errors are also included in the
+ error returned by `DefaultAzureCredential.GetToken()` when it's unable to acquire a token
+ from any source. ([#15923](https://github.com/Azure/azure-sdk-for-go/issues/15923))
+
+## 0.13.0 (2022-01-11)
+
+### Breaking Changes
+* Replaced `AuthenticationFailedError.RawResponse()` with a field having the same name
+* Unexported `CredentialUnavailableError`
+* Instances of `ChainedTokenCredential` will now skip looping through the list of source credentials and re-use the first successful credential on subsequent calls to `GetToken`.
+ * If `ChainedTokenCredentialOptions.RetrySources` is true, `ChainedTokenCredential` will continue to try all of the originally provided credentials each time the `GetToken` method is called.
+ * `ChainedTokenCredential.successfulCredential` will contain a reference to the last successful credential.
+ * `DefaultAzureCredenial` will also re-use the first successful credential on subsequent calls to `GetToken`.
+ * `DefaultAzureCredential.chain.successfulCredential` will also contain a reference to the last successful credential.
+
+### Other Changes
+* `ManagedIdentityCredential` no longer probes IMDS before requesting a token
+ from it. Also, an error response from IMDS no longer disables a credential
+ instance. Following an error, a credential instance will continue to send
+ requests to IMDS as necessary.
+* Adopted MSAL for user and service principal authentication
+* Updated `azcore` requirement to 0.21.0
+
+## 0.12.0 (2021-11-02)
+### Breaking Changes
+* Raised minimum go version to 1.16
+* Removed `NewAuthenticationPolicy()` from credentials. Clients should instead use azcore's
+ `runtime.NewBearerTokenPolicy()` to construct a bearer token authorization policy.
+* The `AuthorityHost` field in credential options structs is now a custom type,
+ `AuthorityHost`, with underlying type `string`
+* `NewChainedTokenCredential` has a new signature to accommodate a placeholder
+ options struct:
+ ```go
+ // before
+ cred, err := NewChainedTokenCredential(credA, credB)
+
+ // after
+ cred, err := NewChainedTokenCredential([]azcore.TokenCredential{credA, credB}, nil)
+ ```
+* Removed `ExcludeAzureCLICredential`, `ExcludeEnvironmentCredential`, and `ExcludeMSICredential`
+ from `DefaultAzureCredentialOptions`
+* `NewClientCertificateCredential` requires a `[]*x509.Certificate` and `crypto.PrivateKey` instead of
+ a path to a certificate file. Added `ParseCertificates` to simplify getting these in common cases:
+ ```go
+ // before
+ cred, err := NewClientCertificateCredential("tenant", "client-id", "/cert.pem", nil)
+
+ // after
+ certData, err := os.ReadFile("/cert.pem")
+ certs, key, err := ParseCertificates(certData, password)
+ cred, err := NewClientCertificateCredential(tenantID, clientID, certs, key, nil)
+ ```
+* Removed `InteractiveBrowserCredentialOptions.ClientSecret` and `.Port`
+* Removed `AADAuthenticationFailedError`
+* Removed `id` parameter of `NewManagedIdentityCredential()`. User assigned identities are now
+ specified by `ManagedIdentityCredentialOptions.ID`:
+ ```go
+ // before
+ cred, err := NewManagedIdentityCredential("client-id", nil)
+ // or, for a resource ID
+ opts := &ManagedIdentityCredentialOptions{ID: ResourceID}
+ cred, err := NewManagedIdentityCredential("/subscriptions/...", opts)
+
+ // after
+ clientID := ClientID("7cf7db0d-...")
+ opts := &ManagedIdentityCredentialOptions{ID: clientID}
+ // or, for a resource ID
+ resID: ResourceID("/subscriptions/...")
+ opts := &ManagedIdentityCredentialOptions{ID: resID}
+ cred, err := NewManagedIdentityCredential(opts)
+ ```
+* `DeviceCodeCredentialOptions.UserPrompt` has a new type: `func(context.Context, DeviceCodeMessage) error`
+* Credential options structs now embed `azcore.ClientOptions`. In addition to changing literal initialization
+ syntax, this change renames `HTTPClient` fields to `Transport`.
+* Renamed `LogCredential` to `EventCredential`
+* `AzureCLICredential` no longer reads the environment variable `AZURE_CLI_PATH`
+* `NewManagedIdentityCredential` no longer reads environment variables `AZURE_CLIENT_ID` and
+ `AZURE_RESOURCE_ID`. Use `ManagedIdentityCredentialOptions.ID` instead.
+* Unexported `AuthenticationFailedError` and `CredentialUnavailableError` structs. In their place are two
+ interfaces having the same names.
+
+### Bugs Fixed
+* `AzureCLICredential.GetToken` no longer mutates its `opts.Scopes`
+
+### Features Added
+* Added connection configuration options to `DefaultAzureCredentialOptions`
+* `AuthenticationFailedError.RawResponse()` returns the HTTP response motivating the error,
+ if available
+
+### Other Changes
+* `NewDefaultAzureCredential()` returns `*DefaultAzureCredential` instead of `*ChainedTokenCredential`
+* Added `TenantID` field to `DefaultAzureCredentialOptions` and `AzureCLICredentialOptions`
+
+## 0.11.0 (2021-09-08)
+### Breaking Changes
+* Unexported `AzureCLICredentialOptions.TokenProvider` and its type,
+ `AzureCLITokenProvider`
+
+### Bug Fixes
+* `ManagedIdentityCredential.GetToken` returns `CredentialUnavailableError`
+ when IMDS has no assigned identity, signaling `DefaultAzureCredential` to
+ try other credentials
+
+
+## 0.10.0 (2021-08-30)
+### Breaking Changes
+* Update based on `azcore` refactor [#15383](https://github.com/Azure/azure-sdk-for-go/pull/15383)
+
+## 0.9.3 (2021-08-20)
+
+### Bugs Fixed
+* `ManagedIdentityCredential.GetToken` no longer mutates its `opts.Scopes`
+
+### Other Changes
+* Bumps version of `azcore` to `v0.18.1`
+
+
+## 0.9.2 (2021-07-23)
+### Features Added
+* Adding support for Service Fabric environment in `ManagedIdentityCredential`
+* Adding an option for using a resource ID instead of client ID in `ManagedIdentityCredential`
+
+
+## 0.9.1 (2021-05-24)
+### Features Added
+* Add LICENSE.txt and bump version information
+
+
+## 0.9.0 (2021-05-21)
+### Features Added
+* Add support for authenticating in Azure Stack environments
+* Enable user assigned identities for the IMDS scenario in `ManagedIdentityCredential`
+* Add scope to resource conversion in `GetToken()` on `ManagedIdentityCredential`
+
+
+## 0.8.0 (2021-01-20)
+### Features Added
+* Updating documentation
+
+
+## 0.7.1 (2021-01-04)
+### Features Added
+* Adding port option to `InteractiveBrowserCredential`
+
+
+## 0.7.0 (2020-12-11)
+### Features Added
+* Add `redirectURI` parameter back to authentication code flow
+
+
+## 0.6.1 (2020-12-09)
+### Features Added
+* Updating query parameter in `ManagedIdentityCredential` and updating datetime string for parsing managed identity access tokens.
+
+
+## 0.6.0 (2020-11-16)
+### Features Added
+* Remove `RedirectURL` parameter from auth code flow to align with the MSAL implementation which relies on the native client redirect URL.
+
+
+## 0.5.0 (2020-10-30)
+### Features Added
+* Flattening credential options
+
+
+## 0.4.3 (2020-10-21)
+### Features Added
+* Adding Azure Arc support in `ManagedIdentityCredential`
+
+
+## 0.4.2 (2020-10-16)
+### Features Added
+* Typo fixes
+
+
+## 0.4.1 (2020-10-16)
+### Features Added
+* Ensure authority hosts are only HTTPs
+
+
+## 0.4.0 (2020-10-16)
+### Features Added
+* Adding options structs for credentials
+
+
+## 0.3.0 (2020-10-09)
+### Features Added
+* Update `DeviceCodeCredential` callback
+
+
+## 0.2.2 (2020-10-09)
+### Features Added
+* Add `AuthorizationCodeCredential`
+
+
+## 0.2.1 (2020-10-06)
+### Features Added
+* Add `InteractiveBrowserCredential`
+
+
+## 0.2.0 (2020-09-11)
+### Features Added
+* Refactor `azidentity` on top of `azcore` refactor
+* Updated policies to conform to `policy.Policy` interface changes.
+* Updated non-retriable errors to conform to `azcore.NonRetriableError`.
+* Fixed calls to `Request.SetBody()` to include content type.
+* Switched endpoints to string types and removed extra parsing code.
+
+
+## 0.1.1 (2020-09-02)
+### Features Added
+* Add `AzureCLICredential` to `DefaultAzureCredential` chain
+
+
+## 0.1.0 (2020-07-23)
+### Features Added
+* Initial Release. Azure Identity library that provides Azure Active Directory token authentication support for the SDK.
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/LICENSE.txt b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/LICENSE.txt
new file mode 100644
index 00000000000..48ea6616b5b
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/LICENSE.txt
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) Microsoft Corporation.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/MIGRATION.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/MIGRATION.md
new file mode 100644
index 00000000000..4ac53eb7b27
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/MIGRATION.md
@@ -0,0 +1,307 @@
+# Migrating from autorest/adal to azidentity
+
+`azidentity` provides Azure Active Directory (Azure AD) authentication for the newest Azure SDK modules (`github.com/azure-sdk-for-go/sdk/...`). Older Azure SDK packages (`github.com/azure-sdk-for-go/services/...`) use types from `github.com/go-autorest/autorest/adal` instead.
+
+This guide shows common authentication code using `autorest/adal` and its equivalent using `azidentity`.
+
+## Table of contents
+
+- [Acquire a token](#acquire-a-token)
+- [Client certificate authentication](#client-certificate-authentication)
+- [Client secret authentication](#client-secret-authentication)
+- [Configuration](#configuration)
+- [Device code authentication](#device-code-authentication)
+- [Managed identity](#managed-identity)
+- [Use azidentity credentials with older packages](#use-azidentity-credentials-with-older-packages)
+
+## Configuration
+
+### `autorest/adal`
+
+Token providers require a token audience (resource identifier) and an instance of `adal.OAuthConfig`, which requires an Azure AD endpoint and tenant:
+
+```go
+import "github.com/Azure/go-autorest/autorest/adal"
+
+oauthCfg, err := adal.NewOAuthConfig("https://login.chinacloudapi.cn", tenantID)
+handle(err)
+
+spt, err := adal.NewServicePrincipalTokenWithSecret(
+ *oauthCfg, clientID, "https://management.chinacloudapi.cn/", &adal.ServicePrincipalTokenSecret{ClientSecret: secret},
+)
+```
+
+### `azidentity`
+
+A credential instance can acquire tokens for any audience. The audience for each token is determined by the client requesting it. Credentials require endpoint configuration only for sovereign or private clouds. The `azcore/cloud` package has predefined configuration for sovereign clouds such as Azure China:
+
+```go
+import (
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud"
+ "github.com/Azure/azure-sdk-for-go/sdk/azidentity"
+)
+
+clientOpts := azcore.ClientOptions{Cloud: cloud.AzureChina}
+
+cred, err := azidentity.NewClientSecretCredential(
+ tenantID, clientID, secret, &azidentity.ClientSecretCredentialOptions{ClientOptions: clientOpts},
+)
+handle(err)
+```
+
+## Client secret authentication
+
+### `autorest/adal`
+
+```go
+import (
+ "github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2018-06-01/subscriptions"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/adal"
+)
+
+oauthCfg, err := adal.NewOAuthConfig("https://login.microsoftonline.com", tenantID)
+handle(err)
+spt, err := adal.NewServicePrincipalTokenWithSecret(
+ *oauthCfg, clientID, "https://management.azure.com/", &adal.ServicePrincipalTokenSecret{ClientSecret: secret},
+)
+handle(err)
+
+client := subscriptions.NewClient()
+client.Authorizer = autorest.NewBearerAuthorizer(spt)
+```
+
+### `azidentity`
+
+```go
+import (
+ "github.com/Azure/azure-sdk-for-go/sdk/azidentity"
+ "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armsubscriptions"
+)
+
+cred, err := azidentity.NewClientSecretCredential(tenantID, clientID, secret, nil)
+handle(err)
+
+client, err := armsubscriptions.NewClient(cred, nil)
+handle(err)
+```
+
+## Client certificate authentication
+
+### `autorest/adal`
+
+```go
+import (
+ "os"
+
+ "github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2018-06-01/subscriptions"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/adal"
+)
+certData, err := os.ReadFile("./example.pfx")
+handle(err)
+
+certificate, rsaPrivateKey, err := decodePkcs12(certData, "")
+handle(err)
+
+oauthCfg, err := adal.NewOAuthConfig("https://login.microsoftonline.com", tenantID)
+handle(err)
+
+spt, err := adal.NewServicePrincipalTokenFromCertificate(
+ *oauthConfig, clientID, certificate, rsaPrivateKey, "https://management.azure.com/",
+)
+
+client := subscriptions.NewClient()
+client.Authorizer = autorest.NewBearerAuthorizer(spt)
+```
+
+### `azidentity`
+
+```go
+import (
+ "os"
+
+ "github.com/Azure/azure-sdk-for-go/sdk/azidentity"
+ "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armsubscriptions"
+)
+
+certData, err := os.ReadFile("./example.pfx")
+handle(err)
+
+certs, key, err := azidentity.ParseCertificates(certData, nil)
+handle(err)
+
+cred, err = azidentity.NewClientCertificateCredential(tenantID, clientID, certs, key, nil)
+handle(err)
+
+client, err := armsubscriptions.NewClient(cred, nil)
+handle(err)
+```
+
+## Managed identity
+
+### `autorest/adal`
+
+```go
+import (
+ "github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2018-06-01/subscriptions"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/adal"
+)
+
+spt, err := adal.NewServicePrincipalTokenFromManagedIdentity("https://management.azure.com/", nil)
+handle(err)
+
+client := subscriptions.NewClient()
+client.Authorizer = autorest.NewBearerAuthorizer(spt)
+```
+
+### `azidentity`
+
+```go
+import (
+ "github.com/Azure/azure-sdk-for-go/sdk/azidentity"
+ "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armsubscriptions"
+)
+
+cred, err := azidentity.NewManagedIdentityCredential(nil)
+handle(err)
+
+client, err := armsubscriptions.NewClient(cred, nil)
+handle(err)
+```
+
+### User-assigned identities
+
+`autorest/adal`:
+
+```go
+import "github.com/Azure/go-autorest/autorest/adal"
+
+opts := &adal.ManagedIdentityOptions{ClientID: "..."}
+spt, err := adal.NewServicePrincipalTokenFromManagedIdentity("https://management.azure.com/")
+handle(err)
+```
+
+`azidentity`:
+
+```go
+import "github.com/Azure/azure-sdk-for-go/sdk/azidentity"
+
+opts := azidentity.ManagedIdentityCredentialOptions{ID: azidentity.ClientID("...")}
+cred, err := azidentity.NewManagedIdentityCredential(&opts)
+handle(err)
+```
+
+## Device code authentication
+
+### `autorest/adal`
+
+```go
+import (
+ "fmt"
+ "net/http"
+
+ "github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2018-06-01/subscriptions"
+ "github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/autorest/adal"
+)
+
+oauthClient := &http.Client{}
+oauthCfg, err := adal.NewOAuthConfig("https://login.microsoftonline.com", tenantID)
+handle(err)
+resource := "https://management.azure.com/"
+deviceCode, err := adal.InitiateDeviceAuth(oauthClient, *oauthCfg, clientID, resource)
+handle(err)
+
+// display instructions, wait for the user to authenticate
+fmt.Println(*deviceCode.Message)
+token, err := adal.WaitForUserCompletion(oauthClient, deviceCode)
+handle(err)
+
+spt, err := adal.NewServicePrincipalTokenFromManualToken(*oauthCfg, clientID, resource, *token)
+handle(err)
+
+client := subscriptions.NewClient()
+client.Authorizer = autorest.NewBearerAuthorizer(spt)
+```
+
+### `azidentity`
+
+```go
+import (
+ "github.com/Azure/azure-sdk-for-go/sdk/azidentity"
+ "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armsubscriptions"
+)
+
+cred, err := azidentity.NewDeviceCodeCredential(nil)
+handle(err)
+
+client, err := armsubscriptions.NewSubscriptionsClient(cred, nil)
+handle(err)
+```
+
+`azidentity.DeviceCodeCredential` will guide a user through authentication, printing instructions to the console by default. The user prompt is customizable. For more information, see the [package documentation](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#DeviceCodeCredential).
+
+## Acquire a token
+
+### `autorest/adal`
+
+```go
+import "github.com/Azure/go-autorest/autorest/adal"
+
+oauthCfg, err := adal.NewOAuthConfig("https://login.microsoftonline.com", tenantID)
+handle(err)
+
+spt, err := adal.NewServicePrincipalTokenWithSecret(
+ *oauthCfg, clientID, "https://vault.azure.net", &adal.ServicePrincipalTokenSecret{ClientSecret: secret},
+)
+
+err = spt.Refresh()
+if err == nil {
+ token := spt.Token
+}
+```
+
+### `azidentity`
+
+In ordinary usage, application code doesn't need to request tokens from credentials directly. Azure SDK clients handle token acquisition and refreshing internally. However, applications may call `GetToken()` to do so. All credential types have this method.
+
+```go
+import (
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
+ "github.com/Azure/azure-sdk-for-go/sdk/azidentity"
+)
+
+cred, err := azidentity.NewClientSecretCredential(tenantID, clientID, secret, nil)
+handle(err)
+
+tk, err := cred.GetToken(
+ context.TODO(), policy.TokenRequestOptions{Scopes: []string{"https://vault.azure.net/.default"}},
+)
+if err == nil {
+ token := tk.Token
+}
+```
+
+Note that `azidentity` credentials use the Azure AD v2.0 endpoint, which requires OAuth 2 scopes instead of the resource identifiers `autorest/adal` expects. For more information, see [Azure AD documentation](https://docs.microsoft.com/azure/active-directory/develop/v2-permissions-and-consent).
+
+## Use azidentity credentials with older packages
+
+The [azidext module](https://pkg.go.dev/github.com/jongio/azidext/go/azidext) provides an adapter for `azidentity` credential types. The adapter enables using the credential types with older Azure SDK clients. For example:
+
+```go
+import (
+ "github.com/Azure/azure-sdk-for-go/sdk/azidentity"
+ "github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2018-06-01/subscriptions"
+ "github.com/jongio/azidext/go/azidext"
+)
+
+cred, err := azidentity.NewClientSecretCredential(tenantID, clientID, secret, nil)
+handle(err)
+
+client := subscriptions.NewClient()
+client.Authorizer = azidext.NewTokenCredentialAdapter(cred, []string{"https://management.azure.com//.default"})
+```
+
+
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md
new file mode 100644
index 00000000000..68b35a545c3
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md
@@ -0,0 +1,239 @@
+# Azure Identity Client Module for Go
+
+The Azure Identity module provides Azure Active Directory (Azure AD) token authentication support across the Azure SDK. It includes a set of `TokenCredential` implementations, which can be used with Azure SDK clients supporting token authentication.
+
+[](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity)
+| [Azure Active Directory documentation](https://docs.microsoft.com/azure/active-directory/)
+| [Source code](https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/azidentity)
+
+# Getting started
+
+## Install the module
+
+This project uses [Go modules](https://github.com/golang/go/wiki/Modules) for versioning and dependency management.
+
+Install the Azure Identity module:
+
+```sh
+go get -u github.com/Azure/azure-sdk-for-go/sdk/azidentity
+```
+
+## Prerequisites
+
+- an [Azure subscription](https://azure.microsoft.com/free/)
+- Go 1.18
+
+### Authenticating during local development
+
+When debugging and executing code locally, developers typically use their own accounts to authenticate calls to Azure services. The `azidentity` module supports authenticating through developer tools to simplify local development.
+
+#### Authenticating via the Azure CLI
+
+`DefaultAzureCredential` and `AzureCLICredential` can authenticate as the user
+signed in to the [Azure CLI](https://docs.microsoft.com/cli/azure). To sign in to the Azure CLI, run `az login`. On a system with a default web browser, the Azure CLI will launch the browser to authenticate a user.
+
+When no default browser is available, `az login` will use the device code
+authentication flow. This can also be selected manually by running `az login --use-device-code`.
+
+## Key concepts
+
+### Credentials
+
+A credential is a type which contains or can obtain the data needed for a
+service client to authenticate requests. Service clients across the Azure SDK
+accept a credential instance when they are constructed, and use that credential
+to authenticate requests.
+
+The `azidentity` module focuses on OAuth authentication with Azure Active
+Directory (AAD). It offers a variety of credential types capable of acquiring
+an Azure AD access token. See [Credential Types](#credential-types "Credential Types") for a list of this module's credential types.
+
+### DefaultAzureCredential
+
+`DefaultAzureCredential` is appropriate for most apps that will be deployed to Azure. It combines common production credentials with development credentials. It attempts to authenticate via the following mechanisms in this order, stopping when one succeeds:
+
+
+
+1. **Environment** - `DefaultAzureCredential` will read account information specified via [environment variables](#environment-variables) and use it to authenticate.
+2. **Managed Identity** - If the app is deployed to an Azure host with managed identity enabled, `DefaultAzureCredential` will authenticate with it.
+3. **Azure CLI** - If a user or service principal has authenticated via the Azure CLI `az login` command, `DefaultAzureCredential` will authenticate that identity.
+
+> Note: `DefaultAzureCredential` is intended to simplify getting started with the SDK by handling common scenarios with reasonable default behaviors. Developers who want more control or whose scenario isn't served by the default settings should use other credential types.
+
+## Managed Identity
+
+`DefaultAzureCredential` and `ManagedIdentityCredential` support
+[managed identity authentication](https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/overview)
+in any hosting environment which supports managed identities, such as (this list is not exhaustive):
+* [Azure App Service](https://docs.microsoft.com/azure/app-service/overview-managed-identity)
+* [Azure Arc](https://docs.microsoft.com/azure/azure-arc/servers/managed-identity-authentication)
+* [Azure Cloud Shell](https://docs.microsoft.com/azure/cloud-shell/msi-authorization)
+* [Azure Kubernetes Service](https://docs.microsoft.com/azure/aks/use-managed-identity)
+* [Azure Service Fabric](https://docs.microsoft.com/azure/service-fabric/concepts-managed-identity)
+* [Azure Virtual Machines](https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/how-to-use-vm-token)
+
+## Examples
+
+- [Authenticate with DefaultAzureCredential](#authenticate-with-defaultazurecredential "Authenticate with DefaultAzureCredential")
+- [Define a custom authentication flow with ChainedTokenCredential](#define-a-custom-authentication-flow-with-chainedtokencredential "Define a custom authentication flow with ChainedTokenCredential")
+- [Specify a user-assigned managed identity for DefaultAzureCredential](#specify-a-user-assigned-managed-identity-for-defaultazurecredential)
+
+### Authenticate with DefaultAzureCredential
+
+This example demonstrates authenticating a client from the `armresources` module with `DefaultAzureCredential`.
+
+```go
+cred, err := azidentity.NewDefaultAzureCredential(nil)
+if err != nil {
+ // handle error
+}
+
+client := armresources.NewResourceGroupsClient("subscription ID", cred, nil)
+```
+
+### Specify a user-assigned managed identity for DefaultAzureCredential
+
+To configure `DefaultAzureCredential` to authenticate a user-assigned managed identity, set the environment variable `AZURE_CLIENT_ID` to the identity's client ID.
+
+### Define a custom authentication flow with `ChainedTokenCredential`
+
+`DefaultAzureCredential` is generally the quickest way to get started developing apps for Azure. For more advanced scenarios, `ChainedTokenCredential` links multiple credential instances to be tried sequentially when authenticating. It will try each chained credential in turn until one provides a token or fails to authenticate due to an error.
+
+The following example demonstrates creating a credential, which will attempt to authenticate using managed identity. It will fall back to authenticating via the Azure CLI when a managed identity is unavailable.
+
+```go
+managed, err := azidentity.NewManagedIdentityCredential(nil)
+if err != nil {
+ // handle error
+}
+azCLI, err := azidentity.NewAzureCLICredential(nil)
+if err != nil {
+ // handle error
+}
+chain, err := azidentity.NewChainedTokenCredential([]azcore.TokenCredential{managed, azCLI}, nil)
+if err != nil {
+ // handle error
+}
+
+client := armresources.NewResourceGroupsClient("subscription ID", chain, nil)
+```
+
+## Credential Types
+
+### Authenticating Azure Hosted Applications
+
+|Credential|Usage
+|-|-
+|[DefaultAzureCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#DefaultAzureCredential)|Simplified authentication experience for getting started developing Azure apps
+|[ChainedTokenCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#ChainedTokenCredential)|Define custom authentication flows, composing multiple credentials
+|[EnvironmentCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#EnvironmentCredential)|Authenticate a service principal or user configured by environment variables
+|[ManagedIdentityCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#ManagedIdentityCredential)|Authenticate the managed identity of an Azure resource
+
+### Authenticating Service Principals
+
+|Credential|Usage
+|-|-
+|[ClientSecretCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#ClientSecretCredential)|Authenticate a service principal with a secret
+|[ClientCertificateCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#ClientCertificateCredential)|Authenticate a service principal with a certificate
+
+### Authenticating Users
+
+|Credential|Usage
+|-|-
+|[InteractiveBrowserCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#InteractiveBrowserCredential)|Interactively authenticate a user with the default web browser
+|[DeviceCodeCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#DeviceCodeCredential)|Interactively authenticate a user on a device with limited UI
+|[UsernamePasswordCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#UsernamePasswordCredential)|Authenticate a user with a username and password
+
+### Authenticating via Development Tools
+
+|Credential|Usage
+|-|-
+|[AzureCLICredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#AzureCLICredential)|Authenticate as the user signed in to the Azure CLI
+
+## Environment Variables
+
+`DefaultAzureCredential` and `EnvironmentCredential` can be configured with environment variables. Each type of authentication requires values for specific variables:
+
+#### Service principal with secret
+
+|variable name|value
+|-|-
+|`AZURE_CLIENT_ID`|ID of an Azure Active Directory application
+|`AZURE_TENANT_ID`|ID of the application's Azure Active Directory tenant
+|`AZURE_CLIENT_SECRET`|one of the application's client secrets
+
+#### Service principal with certificate
+
+|variable name|value
+|-|-
+|`AZURE_CLIENT_ID`|ID of an Azure Active Directory application
+|`AZURE_TENANT_ID`|ID of the application's Azure Active Directory tenant
+|`AZURE_CLIENT_CERTIFICATE_PATH`|path to a certificate file including private key (without password protection)
+
+#### Username and password
+
+|variable name|value
+|-|-
+|`AZURE_CLIENT_ID`|ID of an Azure Active Directory application
+|`AZURE_USERNAME`|a username (usually an email address)
+|`AZURE_PASSWORD`|that user's password
+
+Configuration is attempted in the above order. For example, if values for a
+client secret and certificate are both present, the client secret will be used.
+
+## Troubleshooting
+
+### Error Handling
+
+Credentials return an `error` when they fail to authenticate or lack data they require to authenticate. For guidance on resolving errors from specific credential types, see the [troubleshooting guide](https://aka.ms/azsdk/go/identity/troubleshoot).
+
+For more details on handling specific Azure Active Directory errors please refer to the
+Azure Active Directory
+[error code documentation](https://docs.microsoft.com/azure/active-directory/develop/reference-aadsts-error-codes).
+
+### Logging
+
+This module uses the classification-based logging implementation in `azcore`. To enable console logging for all SDK modules, set `AZURE_SDK_GO_LOGGING` to `all`. Use the `azcore/log` package to control log event output or to enable logs for `azidentity` only. For example:
+```go
+import azlog "github.com/Azure/azure-sdk-for-go/sdk/azcore/log"
+
+// print log output to stdout
+azlog.SetListener(func(event azlog.Event, s string) {
+ fmt.Println(s)
+})
+
+// include only azidentity credential logs
+azlog.SetEvents(azidentity.EventAuthentication)
+```
+
+Credentials log basic information only, such as `GetToken` success or failure and errors. These log entries don't contain authentication secrets but may contain sensitive information.
+
+## Next steps
+
+Client and management modules listed on the [Azure SDK releases page](https://azure.github.io/azure-sdk/releases/latest/go.html) support authenticating with `azidentity` credential types. You can learn more about using these libraries in their documentation, which is linked from the release page.
+
+## Provide Feedback
+
+If you encounter bugs or have suggestions, please
+[open an issue](https://github.com/Azure/azure-sdk-for-go/issues).
+
+## Contributing
+
+This project welcomes contributions and suggestions. Most contributions require
+you to agree to a Contributor License Agreement (CLA) declaring that you have
+the right to, and actually do, grant us the rights to use your contribution.
+For details, visit [https://cla.microsoft.com](https://cla.microsoft.com).
+
+When you submit a pull request, a CLA-bot will automatically determine whether
+you need to provide a CLA and decorate the PR appropriately (e.g., label,
+comment). Simply follow the instructions provided by the bot. You will only
+need to do this once across all repos using our CLA.
+
+This project has adopted the
+[Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/).
+For more information, see the
+[Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/)
+or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any
+additional questions or comments.
+
+
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md
new file mode 100644
index 00000000000..1e28d181fef
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md
@@ -0,0 +1,192 @@
+# Troubleshoot Azure Identity authentication issues
+
+This troubleshooting guide covers failure investigation techniques, common errors for the credential types in the `azidentity` module, and mitigation steps to resolve these errors.
+
+## Table of contents
+
+- [Handle azidentity errors](#handle-azidentity-errors)
+ - [Permission issues](#permission-issues)
+- [Find relevant information in errors](#find-relevant-information-in-errors)
+- [Enable and configure logging](#enable-and-configure-logging)
+- [Troubleshoot DefaultAzureCredential authentication issues](#troubleshoot-defaultazurecredential-authentication-issues)
+- [Troubleshoot EnvironmentCredential authentication issues](#troubleshoot-environmentcredential-authentication-issues)
+- [Troubleshoot ClientSecretCredential authentication issues](#troubleshoot-clientsecretcredential-authentication-issues)
+- [Troubleshoot ClientCertificateCredential authentication issues](#troubleshoot-clientcertificatecredential-authentication-issues)
+- [Troubleshoot UsernamePasswordCredential authentication issues](#troubleshoot-usernamepasswordcredential-authentication-issues)
+- [Troubleshoot ManagedIdentityCredential authentication issues](#troubleshoot-managedidentitycredential-authentication-issues)
+ - [Azure Virtual Machine managed identity](#azure-virtual-machine-managed-identity)
+ - [Azure App Service and Azure Functions managed identity](#azure-app-service-and-azure-functions-managed-identity)
+ - [Azure Kubernetes Service managed identity](#azure-kubernetes-service-managed-identity)
+- [Troubleshoot AzureCliCredential authentication issues](#troubleshoot-azureclicredential-authentication-issues)
+- [Get additional help](#get-additional-help)
+
+## Handle azidentity errors
+
+Any service client method that makes a request to the service may return an error due to authentication failure. This is because the credential authenticates on the first call to the service and on any subsequent call that needs to refresh an access token. Authentication errors include a description of the failure and possibly an error message from Azure Active Directory (Azure AD). Depending on the application, these errors may or may not be recoverable.
+
+### Permission issues
+
+Service client errors with a status code of 401 or 403 often indicate that authentication succeeded but the caller doesn't have permission to access the specified API. Check the service documentation to determine which RBAC roles are needed for the request, and ensure the authenticated user or service principal has the appropriate role assignments.
+
+## Find relevant information in errors
+
+Authentication errors can include responses from Azure AD and often contain information helpful in diagnosis. Consider the following error message:
+
+```
+ClientSecretCredential authentication failed
+POST https://login.microsoftonline.com/3c631bb7-a9f7-4343-a5ba-a615913/oauth2/v2.0/token
+--------------------------------------------------------------------------------
+RESPONSE 401 Unauthorized
+--------------------------------------------------------------------------------
+{
+ "error": "invalid_client",
+ "error_description": "AADSTS7000215: Invalid client secret provided. Ensure the secret being sent in the request is the client secret value, not the client secret ID, for a secret added to app '86be4c01-505b-45e9-bfc0-9b825fd84'.\r\nTrace ID: 03da4b8e-5ffe-48ca-9754-aff4276f0100\r\nCorrelation ID: 7b12f9bb-2eef-42e3-ad75-eee69ec9088d\r\nTimestamp: 2022-03-02 18:25:26Z",
+ "error_codes": [
+ 7000215
+ ],
+ "timestamp": "2022-03-02 18:25:26Z",
+ "trace_id": "03da4b8e-5ffe-48ca-9754-aff4276f0100",
+ "correlation_id": "7b12f9bb-2eef-42e3-ad75-eee69ec9088d",
+ "error_uri": "https://login.microsoftonline.com/error?code=7000215"
+}
+--------------------------------------------------------------------------------
+```
+
+This error contains several pieces of information:
+
+- __Failing Credential Type__: The type of credential that failed to authenticate. This can be helpful when diagnosing issues with chained credential types such as `DefaultAzureCredential` or `ChainedTokenCredential`.
+
+- __Azure AD Error Code and Message__: The error code and message returned by Azure AD. This can give insight into the specific reason the request failed. For instance, in this case authentication failed because the provided client secret is incorrect. [Azure AD documentation](https://docs.microsoft.com/azure/active-directory/develop/reference-aadsts-error-codes#aadsts-error-codes) has more information on AADSTS error codes.
+
+- __Correlation ID and Timestamp__: The correlation ID and timestamp identify the request in server-side logs. This information can be useful to support engineers diagnosing unexpected Azure AD failures.
+
+### Enable and configure logging
+
+`azidentity` provides the same logging capabilities as the rest of the Azure SDK. The simplest way to see the logs to help debug authentication issues is to print credential logs to the console.
+```go
+import azlog "github.com/Azure/azure-sdk-for-go/sdk/azcore/log"
+
+// print log output to stdout
+azlog.SetListener(func(event azlog.Event, s string) {
+ fmt.Println(s)
+})
+
+// include only azidentity credential logs
+azlog.SetEvents(azidentity.EventAuthentication)
+```
+
+## Troubleshoot DefaultAzureCredential authentication issues
+
+| Error |Description| Mitigation |
+|---|---|---|
+|"DefaultAzureCredential failed to acquire a token"|No credential in the `DefaultAzureCredential` chain provided a token|
- [Enable logging](#enable-and-configure-logging) to get further diagnostic information.
- Consult the troubleshooting guide for underlying credential types for more information.
- [EnvironmentCredential](#troubleshoot-environmentcredential-authentication-issues)
- [ManagedIdentityCredential](#troubleshoot-visualstudiocredential-authentication-issues)
- [AzureCLICredential](#troubleshoot-azureclicredential-authentication-issues)
|
+|Error from the client with a status code of 401 or 403|Authentication succeeded but the authorizing Azure service responded with a 401 (Unauthorized), or 403 (Forbidden) status code|- [Enable logging](#enable-and-configure-logging) to determine which credential in the chain returned the authenticating token.
- If an unexpected credential is returning a token, check application configuration such as environment variables.
- Ensure the correct role is assigned to the authenticated identity. For example, a service specific role rather than the subscription Owner role.
|
+
+## Troubleshoot EnvironmentCredential authentication issues
+
+| Error Message |Description| Mitigation |
+|---|---|---|
+|Missing or incomplete environment variable configuration|A valid combination of environment variables wasn't set|Ensure the appropriate environment variables are set for the intended authentication method as described in the [module documentation](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#EnvironmentCredential)|
+
+## Troubleshoot ClientSecretCredential authentication issues
+
+| Error Code | Issue | Mitigation |
+|---|---|---|
+|AADSTS7000215|An invalid client secret was provided.|Ensure the secret provided to the credential constructor is valid. If unsure, create a new client secret using the Azure portal. Details on creating a new client secret are in [Azure AD documentation](https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal#option-2-create-a-new-application-secret).|
+|AADSTS7000222|An expired client secret was provided.|Create a new client secret using the Azure portal. Details on creating a new client secret are in [Azure AD documentation](https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal#option-2-create-a-new-application-secret).|
+|AADSTS700016|The specified application wasn't found in the specified tenant.|Ensure the client and tenant IDs provided to the credential constructor are correct for your application registration. For multi-tenant apps, ensure the application has been added to the desired tenant by a tenant admin. To add a new application in the desired tenant, follow the [Azure AD instructions](https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal).|
+
+## Troubleshoot ClientCertificateCredential authentication issues
+
+| Error Code | Description | Mitigation |
+|---|---|---|
+|AADSTS700027|Client assertion contains an invalid signature.|Ensure the specified certificate has been uploaded to the application registration as described in [Azure AD documentation](https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal#option-1-upload-a-certificate).|
+|AADSTS700016|The specified application wasn't found in the specified tenant.|Ensure the client and tenant IDs provided to the credential constructor are correct for your application registration. For multi-tenant apps, ensure the application has been added to the desired tenant by a tenant admin. To add a new application in the desired tenant, follow the [Azure AD instructions](https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal).|
+
+## Troubleshoot UsernamePasswordCredential authentication issues
+
+| Error Code | Issue | Mitigation |
+|---|---|---|
+|AADSTS50126|The provided username or password is invalid.|Ensure the username and password provided to the credential constructor are valid.|
+
+## Troubleshoot ManagedIdentityCredential authentication issues
+
+`ManagedIdentityCredential` is designed to work on a variety of Azure hosts support managed identity. Configuration and troubleshooting vary from host to host. The below table lists the Azure hosts that can be assigned a managed identity and are supported by `ManagedIdentityCredential`.
+
+|Host Environment| | |
+|---|---|---|
+|Azure Virtual Machines and Scale Sets|[Configuration](https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/qs-configure-portal-windows-vm)|[Troubleshooting](#azure-virtual-machine-managed-identity)|
+|Azure App Service and Azure Functions|[Configuration](https://docs.microsoft.com/azure/app-service/overview-managed-identity)|[Troubleshooting](#azure-app-service-and-azure-functions-managed-identity)|
+|Azure Kubernetes Service|[Configuration](https://azure.github.io/aad-pod-identity/docs/)|[Troubleshooting](#azure-kubernetes-service-managed-identity)|
+|Azure Arc|[Configuration](https://docs.microsoft.com/azure/azure-arc/servers/managed-identity-authentication)||
+|Azure Service Fabric|[Configuration](https://docs.microsoft.com/azure/service-fabric/concepts-managed-identity)||
+
+### Azure Virtual Machine managed identity
+
+| Error Message |Description| Mitigation |
+|---|---|---|
+|The requested identity hasn’t been assigned to this resource.|The IMDS endpoint responded with a status code of 400, indicating the requested identity isn’t assigned to the VM.|If using a user assigned identity, ensure the specified ID is correct.If using a system assigned identity, make sure it has been enabled as described in [managed identity documentation](https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/qs-configure-portal-windows-vm#enable-system-assigned-managed-identity-on-an-existing-vm).|
+|The request failed due to a gateway error.|The request to the IMDS endpoint failed due to a gateway error, 502 or 504 status code.|IMDS doesn't support requests via proxy or gateway. Disable proxies or gateways running on the VM for requests to the IMDS endpoint `http://169.254.169.254`|
+|No response received from the managed identity endpoint.|No response was received for the request to IMDS or the request timed out.|- Ensure the VM is configured for managed identity as described in [managed identity documentation](https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/qs-configure-portal-windows-vm).
- Verify the IMDS endpoint is reachable on the VM. See [below](#verify-imds-is-available-on-the-vm) for instructions.
|
+|Multiple attempts failed to obtain a token from the managed identity endpoint.|The credential has exhausted its retries for a token request.|- Refer to the error message for more details on specific failures.
- Ensure the VM is configured for managed identity as described in [managed identity documentation](https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/qs-configure-portal-windows-vm).
- Verify the IMDS endpoint is reachable on the VM. See [below](#verify-imds-is-available-on-the-vm) for instructions.
|
+
+#### Verify IMDS is available on the VM
+
+If you have access to the VM, you can use `curl` to verify the managed identity endpoint is available.
+
+```sh
+curl 'http://169.254.169.254/metadata/identity/oauth2/token?resource=https://management.core.windows.net&api-version=2018-02-01' -H "Metadata: true"
+```
+
+> This command's output will contain an access token and SHOULD NOT BE SHARED, to avoid compromising account security.
+
+### Azure App Service and Azure Functions managed identity
+
+| Error Message |Description| Mitigation |
+|---|---|---|
+|Get "`http://169.254.169.254/...`" i/o timeout|The App Service host hasn't set environment variables for managed identity configuration.|- Ensure the App Service is configured for managed identity as described in [App Service documentation](https://docs.microsoft.com/azure/app-service/overview-managed-identity).
- Verify the App Service environment is properly configured and the managed identity endpoint is available. See [below](#verify-the-app-service-managed-identity-endpoint-is-available) for instructions.
|
+
+#### Verify the App Service managed identity endpoint is available
+
+If you can SSH into the App Service, you can verify managed identity is available in the environment. First ensure the environment variables `IDENTITY_ENDPOINT` and `IDENTITY_SECRET` are set. Then you can verify the managed identity endpoint is available using `curl`.
+
+```sh
+curl "$IDENTITY_ENDPOINT?resource=https://management.core.windows.net&api-version=2019-08-01" -H "X-IDENTITY-HEADER: $IDENTITY_HEADER"
+```
+
+> This command's output will contain an access token and SHOULD NOT BE SHARED, to avoid compromising account security.
+
+### Azure Kubernetes Service managed identity
+
+#### Pod Identity
+
+| Error Message |Description| Mitigation |
+|---|---|---|
+|"no azure identity found for request clientID"|The application attempted to authenticate before an identity was assigned to its pod|Verify the pod is labeled correctly. This also occurs when a correctly labeled pod authenticates before the identity is ready. To prevent initialization races, configure NMI to set the Retry-After header in its responses as described in [Pod Identity documentation](https://azure.github.io/aad-pod-identity/docs/configure/feature_flags/#set-retry-after-header-in-nmi-response).
+
+## Troubleshoot AzureCliCredential authentication issues
+
+| Error Message |Description| Mitigation |
+|---|---|---|
+|Azure CLI not found on path|The Azure CLI isn’t installed or isn't on the application's path.|- Ensure the Azure CLI is installed as described in [Azure CLI documentation](https://docs.microsoft.com/cli/azure/install-azure-cli).
- Validate the installation location is in the application's `PATH` environment variable.
|
+|Please run 'az login' to set up account|No account is currently logged into the Azure CLI, or the login has expired.|- Run `az login` to log into the Azure CLI. More information about Azure CLI authentication is available in the [Azure CLI documentation](https://docs.microsoft.com/cli/azure/authenticate-azure-cli).
- Verify that the Azure CLI can obtain tokens. See [below](#verify-the-azure-cli-can-obtain-tokens) for instructions.
|
+
+#### Verify the Azure CLI can obtain tokens
+
+You can manually verify that the Azure CLI can authenticate and obtain tokens. First, use the `account` command to verify the logged in account.
+
+```azurecli
+az account show
+```
+
+Once you've verified the Azure CLI is using the correct account, you can validate that it's able to obtain tokens for that account.
+
+```azurecli
+az account get-access-token --output json --resource https://management.core.windows.net
+```
+
+> This command's output will contain an access token and SHOULD NOT BE SHARED, to avoid compromising account security.
+
+## Get additional help
+
+Additional information on ways to reach out for support can be found in [SUPPORT.md](https://github.com/Azure/azure-sdk-for-go/blob/main/SUPPORT.md).
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go
new file mode 100644
index 00000000000..0faee55ef04
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go
@@ -0,0 +1,129 @@
+//go:build go1.18
+// +build go1.18
+
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+package azidentity
+
+import (
+ "bytes"
+ "context"
+ "errors"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "os"
+ "regexp"
+
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming"
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential"
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/public"
+)
+
+const (
+ azureAuthorityHost = "AZURE_AUTHORITY_HOST"
+ azureClientID = "AZURE_CLIENT_ID"
+ azureRegionalAuthorityName = "AZURE_REGIONAL_AUTHORITY_NAME"
+
+ organizationsTenantID = "organizations"
+ developerSignOnClientID = "04b07795-8ddb-461a-bbee-02f9e1bf7b46"
+ defaultSuffix = "/.default"
+ tenantIDValidationErr = "invalid tenantID. You can locate your tenantID by following the instructions listed here: https://docs.microsoft.com/partner-center/find-ids-and-domain-names"
+)
+
+// setAuthorityHost initializes the authority host for credentials. Precedence is:
+// 1. cloud.Configuration.ActiveDirectoryAuthorityHost value set by user
+// 2. value of AZURE_AUTHORITY_HOST
+// 3. default: Azure Public Cloud
+func setAuthorityHost(cc cloud.Configuration) (string, error) {
+ host := cc.ActiveDirectoryAuthorityHost
+ if host == "" {
+ if len(cc.Services) > 0 {
+ return "", errors.New("missing ActiveDirectoryAuthorityHost for specified cloud")
+ }
+ host = cloud.AzurePublic.ActiveDirectoryAuthorityHost
+ if envAuthorityHost := os.Getenv(azureAuthorityHost); envAuthorityHost != "" {
+ host = envAuthorityHost
+ }
+ }
+ u, err := url.Parse(host)
+ if err != nil {
+ return "", err
+ }
+ if u.Scheme != "https" {
+ return "", errors.New("cannot use an authority host without https")
+ }
+ return host, nil
+}
+
+// validTenantID return true is it receives a valid tenantID, returns false otherwise
+func validTenantID(tenantID string) bool {
+ match, err := regexp.MatchString("^[0-9a-zA-Z-.]+$", tenantID)
+ if err != nil {
+ return false
+ }
+ return match
+}
+
+func newPipelineAdapter(opts *azcore.ClientOptions) pipelineAdapter {
+ pl := runtime.NewPipeline(component, version, runtime.PipelineOptions{}, opts)
+ return pipelineAdapter{pl: pl}
+}
+
+type pipelineAdapter struct {
+ pl runtime.Pipeline
+}
+
+func (p pipelineAdapter) CloseIdleConnections() {
+ // do nothing
+}
+
+func (p pipelineAdapter) Do(r *http.Request) (*http.Response, error) {
+ req, err := runtime.NewRequest(r.Context(), r.Method, r.URL.String())
+ if err != nil {
+ return nil, err
+ }
+ if r.Body != nil && r.Body != http.NoBody {
+ // create a rewindable body from the existing body as required
+ var body io.ReadSeekCloser
+ if rsc, ok := r.Body.(io.ReadSeekCloser); ok {
+ body = rsc
+ } else {
+ b, err := ioutil.ReadAll(r.Body)
+ if err != nil {
+ return nil, err
+ }
+ body = streaming.NopCloser(bytes.NewReader(b))
+ }
+ err = req.SetBody(body, r.Header.Get("Content-Type"))
+ if err != nil {
+ return nil, err
+ }
+ }
+ resp, err := p.pl.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ return resp, err
+}
+
+// enables fakes for test scenarios
+type confidentialClient interface {
+ AcquireTokenSilent(ctx context.Context, scopes []string, options ...confidential.AcquireTokenSilentOption) (confidential.AuthResult, error)
+ AcquireTokenByAuthCode(ctx context.Context, code string, redirectURI string, scopes []string, options ...confidential.AcquireTokenByAuthCodeOption) (confidential.AuthResult, error)
+ AcquireTokenByCredential(ctx context.Context, scopes []string) (confidential.AuthResult, error)
+}
+
+// enables fakes for test scenarios
+type publicClient interface {
+ AcquireTokenSilent(ctx context.Context, scopes []string, options ...public.AcquireTokenSilentOption) (public.AuthResult, error)
+ AcquireTokenByUsernamePassword(ctx context.Context, scopes []string, username string, password string) (public.AuthResult, error)
+ AcquireTokenByDeviceCode(ctx context.Context, scopes []string) (public.DeviceCode, error)
+ AcquireTokenByAuthCode(ctx context.Context, code string, redirectURI string, scopes []string, options ...public.AcquireTokenByAuthCodeOption) (public.AuthResult, error)
+ AcquireTokenInteractive(ctx context.Context, scopes []string, options ...public.InteractiveAuthOption) (public.AuthResult, error)
+}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_cli_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_cli_credential.go
new file mode 100644
index 00000000000..68f46d51a1e
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_cli_credential.go
@@ -0,0 +1,189 @@
+//go:build go1.18
+// +build go1.18
+
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+package azidentity
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "os"
+ "os/exec"
+ "regexp"
+ "runtime"
+ "strings"
+ "time"
+
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
+)
+
+const credNameAzureCLI = "AzureCLICredential"
+
+// used by tests to fake invoking the CLI
+type azureCLITokenProvider func(ctx context.Context, resource string, tenantID string) ([]byte, error)
+
+// AzureCLICredentialOptions contains optional parameters for AzureCLICredential.
+type AzureCLICredentialOptions struct {
+ // TenantID identifies the tenant the credential should authenticate in.
+ // Defaults to the CLI's default tenant, which is typically the home tenant of the logged in user.
+ TenantID string
+
+ tokenProvider azureCLITokenProvider
+}
+
+// init returns an instance of AzureCLICredentialOptions initialized with default values.
+func (o *AzureCLICredentialOptions) init() {
+ if o.tokenProvider == nil {
+ o.tokenProvider = defaultTokenProvider()
+ }
+}
+
+// AzureCLICredential authenticates as the identity logged in to the Azure CLI.
+type AzureCLICredential struct {
+ tokenProvider azureCLITokenProvider
+ tenantID string
+}
+
+// NewAzureCLICredential constructs an AzureCLICredential. Pass nil to accept default options.
+func NewAzureCLICredential(options *AzureCLICredentialOptions) (*AzureCLICredential, error) {
+ cp := AzureCLICredentialOptions{}
+ if options != nil {
+ cp = *options
+ }
+ cp.init()
+ return &AzureCLICredential{
+ tokenProvider: cp.tokenProvider,
+ tenantID: cp.TenantID,
+ }, nil
+}
+
+// GetToken requests a token from the Azure CLI. This credential doesn't cache tokens, so every call invokes the CLI.
+// This method is called automatically by Azure SDK clients.
+func (c *AzureCLICredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) {
+ if len(opts.Scopes) != 1 {
+ return azcore.AccessToken{}, errors.New(credNameAzureCLI + ": GetToken() requires exactly one scope")
+ }
+ // CLI expects an AAD v1 resource, not a v2 scope
+ scope := strings.TrimSuffix(opts.Scopes[0], defaultSuffix)
+ at, err := c.authenticate(ctx, scope)
+ if err != nil {
+ return azcore.AccessToken{}, err
+ }
+ logGetTokenSuccess(c, opts)
+ return at, nil
+}
+
+const timeoutCLIRequest = 10 * time.Second
+
+func (c *AzureCLICredential) authenticate(ctx context.Context, resource string) (azcore.AccessToken, error) {
+ output, err := c.tokenProvider(ctx, resource, c.tenantID)
+ if err != nil {
+ return azcore.AccessToken{}, err
+ }
+
+ return c.createAccessToken(output)
+}
+
+func defaultTokenProvider() func(ctx context.Context, resource string, tenantID string) ([]byte, error) {
+ return func(ctx context.Context, resource string, tenantID string) ([]byte, error) {
+ match, err := regexp.MatchString("^[0-9a-zA-Z-.:/]+$", resource)
+ if err != nil {
+ return nil, err
+ }
+ if !match {
+ return nil, fmt.Errorf(`%s: unexpected scope "%s". Only alphanumeric characters and ".", ";", "-", and "/" are allowed`, credNameAzureCLI, resource)
+ }
+
+ ctx, cancel := context.WithTimeout(ctx, timeoutCLIRequest)
+ defer cancel()
+
+ commandLine := "az account get-access-token -o json --resource " + resource
+ if tenantID != "" {
+ commandLine += " --tenant " + tenantID
+ }
+ var cliCmd *exec.Cmd
+ if runtime.GOOS == "windows" {
+ dir := os.Getenv("SYSTEMROOT")
+ if dir == "" {
+ return nil, newCredentialUnavailableError(credNameAzureCLI, "environment variable 'SYSTEMROOT' has no value")
+ }
+ cliCmd = exec.CommandContext(ctx, "cmd.exe", "/c", commandLine)
+ cliCmd.Dir = dir
+ } else {
+ cliCmd = exec.CommandContext(ctx, "/bin/sh", "-c", commandLine)
+ cliCmd.Dir = "/bin"
+ }
+ cliCmd.Env = os.Environ()
+ var stderr bytes.Buffer
+ cliCmd.Stderr = &stderr
+
+ output, err := cliCmd.Output()
+ if err != nil {
+ msg := stderr.String()
+ var exErr *exec.ExitError
+ if errors.As(err, &exErr) && exErr.ExitCode() == 127 || strings.HasPrefix(msg, "'az' is not recognized") {
+ msg = "Azure CLI not found on path"
+ }
+ if msg == "" {
+ msg = err.Error()
+ }
+ return nil, newCredentialUnavailableError(credNameAzureCLI, msg)
+ }
+
+ return output, nil
+ }
+}
+
+func (c *AzureCLICredential) createAccessToken(tk []byte) (azcore.AccessToken, error) {
+ t := struct {
+ AccessToken string `json:"accessToken"`
+ Authority string `json:"_authority"`
+ ClientID string `json:"_clientId"`
+ ExpiresOn string `json:"expiresOn"`
+ IdentityProvider string `json:"identityProvider"`
+ IsMRRT bool `json:"isMRRT"`
+ RefreshToken string `json:"refreshToken"`
+ Resource string `json:"resource"`
+ TokenType string `json:"tokenType"`
+ UserID string `json:"userId"`
+ }{}
+ err := json.Unmarshal(tk, &t)
+ if err != nil {
+ return azcore.AccessToken{}, err
+ }
+
+ tokenExpirationDate, err := parseExpirationDate(t.ExpiresOn)
+ if err != nil {
+ return azcore.AccessToken{}, fmt.Errorf("Error parsing Token Expiration Date %q: %+v", t.ExpiresOn, err)
+ }
+
+ converted := azcore.AccessToken{
+ Token: t.AccessToken,
+ ExpiresOn: *tokenExpirationDate,
+ }
+ return converted, nil
+}
+
+// parseExpirationDate parses either a Azure CLI or CloudShell date into a time object
+func parseExpirationDate(input string) (*time.Time, error) {
+ // CloudShell (and potentially the Azure CLI in future)
+ expirationDate, cloudShellErr := time.Parse(time.RFC3339, input)
+ if cloudShellErr != nil {
+ // Azure CLI (Python) e.g. 2017-08-31 19:48:57.998857 (plus the local timezone)
+ const cliFormat = "2006-01-02 15:04:05.999999"
+ expirationDate, cliErr := time.ParseInLocation(cliFormat, input, time.Local)
+ if cliErr != nil {
+ return nil, fmt.Errorf("Error parsing expiration date %q.\n\nCloudShell Error: \n%+v\n\nCLI Error:\n%+v", input, cloudShellErr, cliErr)
+ }
+ return &expirationDate, nil
+ }
+ return &expirationDate, nil
+}
+
+var _ azcore.TokenCredential = (*AzureCLICredential)(nil)
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/chained_token_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/chained_token_credential.go
new file mode 100644
index 00000000000..86a89064569
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/chained_token_credential.go
@@ -0,0 +1,133 @@
+//go:build go1.18
+// +build go1.18
+
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+package azidentity
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "strings"
+ "sync"
+
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
+ "github.com/Azure/azure-sdk-for-go/sdk/internal/log"
+)
+
+// ChainedTokenCredentialOptions contains optional parameters for ChainedTokenCredential.
+type ChainedTokenCredentialOptions struct {
+ // RetrySources configures how the credential uses its sources. When true, the credential always attempts to
+ // authenticate through each source in turn, stopping when one succeeds. When false, the credential authenticates
+ // only through this first successful source--it never again tries the sources which failed.
+ RetrySources bool
+}
+
+// ChainedTokenCredential links together multiple credentials and tries them sequentially when authenticating. By default,
+// it tries all the credentials until one authenticates, after which it always uses that credential.
+type ChainedTokenCredential struct {
+ cond *sync.Cond
+ iterating bool
+ name string
+ retrySources bool
+ sources []azcore.TokenCredential
+ successfulCredential azcore.TokenCredential
+}
+
+// NewChainedTokenCredential creates a ChainedTokenCredential. Pass nil for options to accept defaults.
+func NewChainedTokenCredential(sources []azcore.TokenCredential, options *ChainedTokenCredentialOptions) (*ChainedTokenCredential, error) {
+ if len(sources) == 0 {
+ return nil, errors.New("sources must contain at least one TokenCredential")
+ }
+ for _, source := range sources {
+ if source == nil { // cannot have a nil credential in the chain or else the application will panic when GetToken() is called on nil
+ return nil, errors.New("sources cannot contain nil")
+ }
+ }
+ cp := make([]azcore.TokenCredential, len(sources))
+ copy(cp, sources)
+ if options == nil {
+ options = &ChainedTokenCredentialOptions{}
+ }
+ return &ChainedTokenCredential{
+ cond: sync.NewCond(&sync.Mutex{}),
+ name: "ChainedTokenCredential",
+ retrySources: options.RetrySources,
+ sources: cp,
+ }, nil
+}
+
+// GetToken calls GetToken on the chained credentials in turn, stopping when one returns a token.
+// This method is called automatically by Azure SDK clients.
+func (c *ChainedTokenCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) {
+ if !c.retrySources {
+ // ensure only one goroutine at a time iterates the sources and perhaps sets c.successfulCredential
+ c.cond.L.Lock()
+ for {
+ if c.successfulCredential != nil {
+ c.cond.L.Unlock()
+ return c.successfulCredential.GetToken(ctx, opts)
+ }
+ if !c.iterating {
+ c.iterating = true
+ // allow other goroutines to wait while this one iterates
+ c.cond.L.Unlock()
+ break
+ }
+ c.cond.Wait()
+ }
+ }
+
+ var err error
+ var errs []error
+ var token azcore.AccessToken
+ var successfulCredential azcore.TokenCredential
+ for _, cred := range c.sources {
+ token, err = cred.GetToken(ctx, opts)
+ if err == nil {
+ log.Writef(EventAuthentication, "%s authenticated with %s", c.name, extractCredentialName(cred))
+ successfulCredential = cred
+ break
+ }
+ errs = append(errs, err)
+ if _, ok := err.(*credentialUnavailableError); !ok {
+ break
+ }
+ }
+ if c.iterating {
+ c.cond.L.Lock()
+ c.successfulCredential = successfulCredential
+ c.iterating = false
+ c.cond.L.Unlock()
+ c.cond.Broadcast()
+ }
+ // err is the error returned by the last GetToken call. It will be nil when that call succeeds
+ if err != nil {
+ // return credentialUnavailableError iff all sources did so; return AuthenticationFailedError otherwise
+ msg := createChainedErrorMessage(errs)
+ if _, ok := err.(*credentialUnavailableError); ok {
+ err = newCredentialUnavailableError(c.name, msg)
+ } else {
+ res := getResponseFromError(err)
+ err = newAuthenticationFailedError(c.name, msg, res)
+ }
+ }
+ return token, err
+}
+
+func createChainedErrorMessage(errs []error) string {
+ msg := "failed to acquire a token.\nAttempted credentials:"
+ for _, err := range errs {
+ msg += fmt.Sprintf("\n\t%s", err.Error())
+ }
+ return msg
+}
+
+func extractCredentialName(credential azcore.TokenCredential) string {
+ return strings.TrimPrefix(fmt.Sprintf("%T", credential), "*azidentity.")
+}
+
+var _ azcore.TokenCredential = (*ChainedTokenCredential)(nil)
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml
new file mode 100644
index 00000000000..3b443e8eedb
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml
@@ -0,0 +1,47 @@
+# NOTE: Please refer to https://aka.ms/azsdk/engsys/ci-yaml before editing this file.
+trigger:
+ branches:
+ include:
+ - main
+ - feature/*
+ - hotfix/*
+ - release/*
+ paths:
+ include:
+ - sdk/azidentity/
+
+pr:
+ branches:
+ include:
+ - main
+ - feature/*
+ - hotfix/*
+ - release/*
+ paths:
+ include:
+ - sdk/azidentity/
+
+stages:
+- template: /eng/pipelines/templates/jobs/archetype-sdk-client.yml
+ parameters:
+ RunLiveTests: true
+ ServiceDirectory: 'azidentity'
+ PreSteps:
+ - pwsh: |
+ [System.Convert]::FromBase64String($env:PFX_CONTENTS) | Set-Content -Path $(Agent.TempDirectory)/test.pfx -AsByteStream
+ Set-Content -Path $(Agent.TempDirectory)/test.pem -Value $env:PEM_CONTENTS
+ [System.Convert]::FromBase64String($env:SNI_CONTENTS) | Set-Content -Path $(Agent.TempDirectory)/testsni.pfx -AsByteStream
+ env:
+ PFX_CONTENTS: $(net-identity-spcert-pfx)
+ PEM_CONTENTS: $(net-identity-spcert-pem)
+ SNI_CONTENTS: $(net-identity-spcert-sni)
+ EnvVars:
+ AZURE_IDENTITY_TEST_TENANTID: $(net-identity-tenantid)
+ AZURE_IDENTITY_TEST_USERNAME: $(net-identity-username)
+ AZURE_IDENTITY_TEST_PASSWORD: $(net-identity-password)
+ IDENTITY_SP_TENANT_ID: $(net-identity-sp-tenantid)
+ IDENTITY_SP_CLIENT_ID: $(net-identity-sp-clientid)
+ IDENTITY_SP_CLIENT_SECRET: $(net-identity-sp-clientsecret)
+ IDENTITY_SP_CERT_PEM: $(Agent.TempDirectory)/test.pem
+ IDENTITY_SP_CERT_PFX: $(Agent.TempDirectory)/test.pfx
+ IDENTITY_SP_CERT_SNI: $(Agent.TempDirectory)/testsni.pfx
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_certificate_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_certificate_credential.go
new file mode 100644
index 00000000000..e50157b104d
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_certificate_credential.go
@@ -0,0 +1,217 @@
+//go:build go1.18
+// +build go1.18
+
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+package azidentity
+
+import (
+ "context"
+ "crypto"
+ "crypto/rsa"
+ "crypto/sha1"
+ "crypto/x509"
+ "encoding/base64"
+ "encoding/pem"
+ "errors"
+ "os"
+
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential"
+ "golang.org/x/crypto/pkcs12"
+)
+
+const credNameCert = "ClientCertificateCredential"
+
+// ClientCertificateCredentialOptions contains optional parameters for ClientCertificateCredential.
+type ClientCertificateCredentialOptions struct {
+ azcore.ClientOptions
+
+ // SendCertificateChain controls whether the credential sends the public certificate chain in the x5c
+ // header of each token request's JWT. This is required for Subject Name/Issuer (SNI) authentication.
+ // Defaults to False.
+ SendCertificateChain bool
+}
+
+// ClientCertificateCredential authenticates a service principal with a certificate.
+type ClientCertificateCredential struct {
+ client confidentialClient
+}
+
+// NewClientCertificateCredential constructs a ClientCertificateCredential. Pass nil for options to accept defaults.
+func NewClientCertificateCredential(tenantID string, clientID string, certs []*x509.Certificate, key crypto.PrivateKey, options *ClientCertificateCredentialOptions) (*ClientCertificateCredential, error) {
+ if len(certs) == 0 {
+ return nil, errors.New("at least one certificate is required")
+ }
+ pk, ok := key.(*rsa.PrivateKey)
+ if !ok {
+ return nil, errors.New("'key' must be an *rsa.PrivateKey")
+ }
+ if !validTenantID(tenantID) {
+ return nil, errors.New(tenantIDValidationErr)
+ }
+ if options == nil {
+ options = &ClientCertificateCredentialOptions{}
+ }
+ authorityHost, err := setAuthorityHost(options.Cloud)
+ if err != nil {
+ return nil, err
+ }
+ cert, err := newCertContents(certs, pk, options.SendCertificateChain)
+ if err != nil {
+ return nil, err
+ }
+ cred := confidential.NewCredFromCert(cert.c, key) // TODO: NewCredFromCert should take a slice
+ if err != nil {
+ return nil, err
+ }
+ o := []confidential.Option{
+ confidential.WithAuthority(runtime.JoinPaths(authorityHost, tenantID)),
+ confidential.WithHTTPClient(newPipelineAdapter(&options.ClientOptions)),
+ confidential.WithAzureRegion(os.Getenv(azureRegionalAuthorityName)),
+ }
+ if options.SendCertificateChain {
+ o = append(o, confidential.WithX5C())
+ }
+ c, err := confidential.New(clientID, cred, o...)
+ if err != nil {
+ return nil, err
+ }
+ return &ClientCertificateCredential{client: c}, nil
+}
+
+// GetToken requests an access token from Azure Active Directory. This method is called automatically by Azure SDK clients.
+func (c *ClientCertificateCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) {
+ if len(opts.Scopes) == 0 {
+ return azcore.AccessToken{}, errors.New(credNameCert + ": GetToken() requires at least one scope")
+ }
+ ar, err := c.client.AcquireTokenSilent(ctx, opts.Scopes)
+ if err == nil {
+ logGetTokenSuccess(c, opts)
+ return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err
+ }
+
+ ar, err = c.client.AcquireTokenByCredential(ctx, opts.Scopes)
+ if err != nil {
+ return azcore.AccessToken{}, newAuthenticationFailedErrorFromMSALError(credNameCert, err)
+ }
+ logGetTokenSuccess(c, opts)
+ return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err
+}
+
+// ParseCertificates loads certificates and a private key, in PEM or PKCS12 format, for use with NewClientCertificateCredential.
+// Pass nil for password if the private key isn't encrypted. This function can't decrypt keys in PEM format.
+func ParseCertificates(certData []byte, password []byte) ([]*x509.Certificate, crypto.PrivateKey, error) {
+ var blocks []*pem.Block
+ var err error
+ if len(password) == 0 {
+ blocks, err = loadPEMCert(certData)
+ }
+ if len(blocks) == 0 || err != nil {
+ blocks, err = loadPKCS12Cert(certData, string(password))
+ }
+ if err != nil {
+ return nil, nil, err
+ }
+ var certs []*x509.Certificate
+ var pk crypto.PrivateKey
+ for _, block := range blocks {
+ switch block.Type {
+ case "CERTIFICATE":
+ c, err := x509.ParseCertificate(block.Bytes)
+ if err != nil {
+ return nil, nil, err
+ }
+ certs = append(certs, c)
+ case "PRIVATE KEY":
+ if pk != nil {
+ return nil, nil, errors.New("certData contains multiple private keys")
+ }
+ pk, err = x509.ParsePKCS8PrivateKey(block.Bytes)
+ if err != nil {
+ pk, err = x509.ParsePKCS1PrivateKey(block.Bytes)
+ }
+ if err != nil {
+ return nil, nil, err
+ }
+ case "RSA PRIVATE KEY":
+ if pk != nil {
+ return nil, nil, errors.New("certData contains multiple private keys")
+ }
+ pk, err = x509.ParsePKCS1PrivateKey(block.Bytes)
+ if err != nil {
+ return nil, nil, err
+ }
+ }
+ }
+ if len(certs) == 0 {
+ return nil, nil, errors.New("found no certificate")
+ }
+ if pk == nil {
+ return nil, nil, errors.New("found no private key")
+ }
+ return certs, pk, nil
+}
+
+type certContents struct {
+ c *x509.Certificate // the signing cert
+ fp []byte // the signing cert's fingerprint, a SHA-1 digest
+ pk *rsa.PrivateKey // the signing key
+ x5c []string // concatenation of every provided cert, base64 encoded
+}
+
+func newCertContents(certs []*x509.Certificate, key *rsa.PrivateKey, sendCertificateChain bool) (*certContents, error) {
+ cc := certContents{pk: key}
+ // need the the signing cert's fingerprint: identify that cert by matching its public key to the private key
+ for _, cert := range certs {
+ certKey, ok := cert.PublicKey.(*rsa.PublicKey)
+ if ok && key.E == certKey.E && key.N.Cmp(certKey.N) == 0 {
+ fp := sha1.Sum(cert.Raw)
+ cc.fp = fp[:]
+ cc.c = cert
+ if sendCertificateChain {
+ // signing cert must be first in x5c
+ cc.x5c = append([]string{base64.StdEncoding.EncodeToString(cert.Raw)}, cc.x5c...)
+ }
+ } else if sendCertificateChain {
+ cc.x5c = append(cc.x5c, base64.StdEncoding.EncodeToString(cert.Raw))
+ }
+ }
+ if len(cc.fp) == 0 || cc.c == nil {
+ return nil, errors.New("found no certificate matching 'key'")
+ }
+ return &cc, nil
+}
+
+func loadPEMCert(certData []byte) ([]*pem.Block, error) {
+ blocks := []*pem.Block{}
+ for {
+ var block *pem.Block
+ block, certData = pem.Decode(certData)
+ if block == nil {
+ break
+ }
+ blocks = append(blocks, block)
+ }
+ if len(blocks) == 0 {
+ return nil, errors.New("didn't find any PEM blocks")
+ }
+ return blocks, nil
+}
+
+func loadPKCS12Cert(certData []byte, password string) ([]*pem.Block, error) {
+ blocks, err := pkcs12.ToPEM(certData, password)
+ if err != nil {
+ return nil, err
+ }
+ if len(blocks) == 0 {
+ // not mentioning PKCS12 in this message because we end up here when certData is garbage
+ return nil, errors.New("didn't find any certificate content")
+ }
+ return blocks, err
+}
+
+var _ azcore.TokenCredential = (*ClientCertificateCredential)(nil)
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_secret_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_secret_credential.go
new file mode 100644
index 00000000000..6ecb8f4db81
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_secret_credential.go
@@ -0,0 +1,78 @@
+//go:build go1.18
+// +build go1.18
+
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+package azidentity
+
+import (
+ "context"
+ "errors"
+ "os"
+
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential"
+)
+
+const credNameSecret = "ClientSecretCredential"
+
+// ClientSecretCredentialOptions contains optional parameters for ClientSecretCredential.
+type ClientSecretCredentialOptions struct {
+ azcore.ClientOptions
+}
+
+// ClientSecretCredential authenticates an application with a client secret.
+type ClientSecretCredential struct {
+ client confidentialClient
+}
+
+// NewClientSecretCredential constructs a ClientSecretCredential. Pass nil for options to accept defaults.
+func NewClientSecretCredential(tenantID string, clientID string, clientSecret string, options *ClientSecretCredentialOptions) (*ClientSecretCredential, error) {
+ if !validTenantID(tenantID) {
+ return nil, errors.New(tenantIDValidationErr)
+ }
+ if options == nil {
+ options = &ClientSecretCredentialOptions{}
+ }
+ authorityHost, err := setAuthorityHost(options.Cloud)
+ if err != nil {
+ return nil, err
+ }
+ cred, err := confidential.NewCredFromSecret(clientSecret)
+ if err != nil {
+ return nil, err
+ }
+ c, err := confidential.New(clientID, cred,
+ confidential.WithAuthority(runtime.JoinPaths(authorityHost, tenantID)),
+ confidential.WithHTTPClient(newPipelineAdapter(&options.ClientOptions)),
+ confidential.WithAzureRegion(os.Getenv(azureRegionalAuthorityName)),
+ )
+ if err != nil {
+ return nil, err
+ }
+ return &ClientSecretCredential{client: c}, nil
+}
+
+// GetToken requests an access token from Azure Active Directory. This method is called automatically by Azure SDK clients.
+func (c *ClientSecretCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) {
+ if len(opts.Scopes) == 0 {
+ return azcore.AccessToken{}, errors.New(credNameSecret + ": GetToken() requires at least one scope")
+ }
+ ar, err := c.client.AcquireTokenSilent(ctx, opts.Scopes)
+ if err == nil {
+ logGetTokenSuccess(c, opts)
+ return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err
+ }
+
+ ar, err = c.client.AcquireTokenByCredential(ctx, opts.Scopes)
+ if err != nil {
+ return azcore.AccessToken{}, newAuthenticationFailedErrorFromMSALError(credNameSecret, err)
+ }
+ logGetTokenSuccess(c, opts)
+ return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err
+}
+
+var _ azcore.TokenCredential = (*ClientSecretCredential)(nil)
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go
new file mode 100644
index 00000000000..7358558acb5
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go
@@ -0,0 +1,132 @@
+//go:build go1.18
+// +build go1.18
+
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+package azidentity
+
+import (
+ "context"
+ "errors"
+ "os"
+ "strings"
+ "time"
+
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
+ "github.com/Azure/azure-sdk-for-go/sdk/internal/log"
+)
+
+// DefaultAzureCredentialOptions contains optional parameters for DefaultAzureCredential.
+// These options may not apply to all credentials in the chain.
+type DefaultAzureCredentialOptions struct {
+ azcore.ClientOptions
+
+ // TenantID identifies the tenant the Azure CLI should authenticate in.
+ // Defaults to the CLI's default tenant, which is typically the home tenant of the user logged in to the CLI.
+ TenantID string
+}
+
+// DefaultAzureCredential is a default credential chain for applications that will deploy to Azure.
+// It combines credentials suitable for deployment with credentials suitable for local development.
+// It attempts to authenticate with each of these credential types, in the following order, stopping when one provides a token:
+// EnvironmentCredential
+// ManagedIdentityCredential
+// AzureCLICredential
+// Consult the documentation for these credential types for more information on how they authenticate.
+// Once a credential has successfully authenticated, DefaultAzureCredential will use that credential for
+// every subsequent authentication.
+type DefaultAzureCredential struct {
+ chain *ChainedTokenCredential
+}
+
+// NewDefaultAzureCredential creates a DefaultAzureCredential. Pass nil for options to accept defaults.
+func NewDefaultAzureCredential(options *DefaultAzureCredentialOptions) (*DefaultAzureCredential, error) {
+ var creds []azcore.TokenCredential
+ var errorMessages []string
+
+ if options == nil {
+ options = &DefaultAzureCredentialOptions{}
+ }
+
+ envCred, err := NewEnvironmentCredential(&EnvironmentCredentialOptions{ClientOptions: options.ClientOptions})
+ if err == nil {
+ creds = append(creds, envCred)
+ } else {
+ errorMessages = append(errorMessages, "EnvironmentCredential: "+err.Error())
+ creds = append(creds, &defaultCredentialErrorReporter{credType: "EnvironmentCredential", err: err})
+ }
+
+ o := &ManagedIdentityCredentialOptions{ClientOptions: options.ClientOptions}
+ if ID, ok := os.LookupEnv(azureClientID); ok {
+ o.ID = ClientID(ID)
+ }
+ msiCred, err := NewManagedIdentityCredential(o)
+ if err == nil {
+ creds = append(creds, msiCred)
+ msiCred.client.imdsTimeout = time.Second
+ } else {
+ errorMessages = append(errorMessages, credNameManagedIdentity+": "+err.Error())
+ creds = append(creds, &defaultCredentialErrorReporter{credType: credNameManagedIdentity, err: err})
+ }
+
+ cliCred, err := NewAzureCLICredential(&AzureCLICredentialOptions{TenantID: options.TenantID})
+ if err == nil {
+ creds = append(creds, cliCred)
+ } else {
+ errorMessages = append(errorMessages, credNameAzureCLI+": "+err.Error())
+ creds = append(creds, &defaultCredentialErrorReporter{credType: credNameAzureCLI, err: err})
+ }
+
+ err = defaultAzureCredentialConstructorErrorHandler(len(creds), errorMessages)
+ if err != nil {
+ return nil, err
+ }
+
+ chain, err := NewChainedTokenCredential(creds, nil)
+ if err != nil {
+ return nil, err
+ }
+ chain.name = "DefaultAzureCredential"
+ return &DefaultAzureCredential{chain: chain}, nil
+}
+
+// GetToken requests an access token from Azure Active Directory. This method is called automatically by Azure SDK clients.
+func (c *DefaultAzureCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) {
+ return c.chain.GetToken(ctx, opts)
+}
+
+var _ azcore.TokenCredential = (*DefaultAzureCredential)(nil)
+
+func defaultAzureCredentialConstructorErrorHandler(numberOfSuccessfulCredentials int, errorMessages []string) (err error) {
+ errorMessage := strings.Join(errorMessages, "\n\t")
+
+ if numberOfSuccessfulCredentials == 0 {
+ return errors.New(errorMessage)
+ }
+
+ if len(errorMessages) != 0 {
+ log.Writef(EventAuthentication, "NewDefaultAzureCredential failed to initialize some credentials:\n\t%s", errorMessage)
+ }
+
+ return nil
+}
+
+// defaultCredentialErrorReporter is a substitute for credentials that couldn't be constructed.
+// Its GetToken method always returns a credentialUnavailableError having the same message as
+// the error that prevented constructing the credential. This ensures the message is present
+// in the error returned by ChainedTokenCredential.GetToken()
+type defaultCredentialErrorReporter struct {
+ credType string
+ err error
+}
+
+func (d *defaultCredentialErrorReporter) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) {
+ if _, ok := d.err.(*credentialUnavailableError); ok {
+ return azcore.AccessToken{}, d.err
+ }
+ return azcore.AccessToken{}, newCredentialUnavailableError(d.credType, d.err.Error())
+}
+
+var _ azcore.TokenCredential = (*defaultCredentialErrorReporter)(nil)
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go
new file mode 100644
index 00000000000..d0c72c34854
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go
@@ -0,0 +1,130 @@
+//go:build go1.18
+// +build go1.18
+
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+package azidentity
+
+import (
+ "context"
+ "errors"
+ "fmt"
+
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/public"
+)
+
+const credNameDeviceCode = "DeviceCodeCredential"
+
+// DeviceCodeCredentialOptions contains optional parameters for DeviceCodeCredential.
+type DeviceCodeCredentialOptions struct {
+ azcore.ClientOptions
+
+ // TenantID is the Azure Active Directory tenant the credential authenticates in. Defaults to the
+ // "organizations" tenant, which can authenticate work and school accounts. Required for single-tenant
+ // applications.
+ TenantID string
+ // ClientID is the ID of the application users will authenticate to.
+ // Defaults to the ID of an Azure development application.
+ ClientID string
+ // UserPrompt controls how the credential presents authentication instructions. The credential calls
+ // this function with authentication details when it receives a device code. By default, the credential
+ // prints these details to stdout.
+ UserPrompt func(context.Context, DeviceCodeMessage) error
+}
+
+func (o *DeviceCodeCredentialOptions) init() {
+ if o.TenantID == "" {
+ o.TenantID = organizationsTenantID
+ }
+ if o.ClientID == "" {
+ o.ClientID = developerSignOnClientID
+ }
+ if o.UserPrompt == nil {
+ o.UserPrompt = func(ctx context.Context, dc DeviceCodeMessage) error {
+ fmt.Println(dc.Message)
+ return nil
+ }
+ }
+}
+
+// DeviceCodeMessage contains the information a user needs to complete authentication.
+type DeviceCodeMessage struct {
+ // UserCode is the user code returned by the service.
+ UserCode string `json:"user_code"`
+ // VerificationURL is the URL at which the user must authenticate.
+ VerificationURL string `json:"verification_uri"`
+ // Message is user instruction from Azure Active Directory.
+ Message string `json:"message"`
+}
+
+// DeviceCodeCredential acquires tokens for a user via the device code flow, which has the
+// user browse to an Azure Active Directory URL, enter a code, and authenticate. It's useful
+// for authenticating a user in an environment without a web browser, such as an SSH session.
+// If a web browser is available, InteractiveBrowserCredential is more convenient because it
+// automatically opens a browser to the login page.
+type DeviceCodeCredential struct {
+ client publicClient
+ userPrompt func(context.Context, DeviceCodeMessage) error
+ account public.Account
+}
+
+// NewDeviceCodeCredential creates a DeviceCodeCredential. Pass nil to accept default options.
+func NewDeviceCodeCredential(options *DeviceCodeCredentialOptions) (*DeviceCodeCredential, error) {
+ cp := DeviceCodeCredentialOptions{}
+ if options != nil {
+ cp = *options
+ }
+ cp.init()
+ if !validTenantID(cp.TenantID) {
+ return nil, errors.New(tenantIDValidationErr)
+ }
+ authorityHost, err := setAuthorityHost(cp.Cloud)
+ if err != nil {
+ return nil, err
+ }
+ c, err := public.New(cp.ClientID,
+ public.WithAuthority(runtime.JoinPaths(authorityHost, cp.TenantID)),
+ public.WithHTTPClient(newPipelineAdapter(&cp.ClientOptions)),
+ )
+ if err != nil {
+ return nil, err
+ }
+ return &DeviceCodeCredential{userPrompt: cp.UserPrompt, client: c}, nil
+}
+
+// GetToken requests an access token from Azure Active Directory. It will begin the device code flow and poll until the user completes authentication.
+// This method is called automatically by Azure SDK clients.
+func (c *DeviceCodeCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) {
+ if len(opts.Scopes) == 0 {
+ return azcore.AccessToken{}, errors.New(credNameDeviceCode + ": GetToken() requires at least one scope")
+ }
+ ar, err := c.client.AcquireTokenSilent(ctx, opts.Scopes, public.WithSilentAccount(c.account))
+ if err == nil {
+ return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err
+ }
+ dc, err := c.client.AcquireTokenByDeviceCode(ctx, opts.Scopes)
+ if err != nil {
+ return azcore.AccessToken{}, newAuthenticationFailedErrorFromMSALError(credNameDeviceCode, err)
+ }
+ err = c.userPrompt(ctx, DeviceCodeMessage{
+ UserCode: dc.Result.UserCode,
+ VerificationURL: dc.Result.VerificationURL,
+ Message: dc.Result.Message,
+ })
+ if err != nil {
+ return azcore.AccessToken{}, err
+ }
+ ar, err = dc.AuthenticationResult(ctx)
+ if err != nil {
+ return azcore.AccessToken{}, newAuthenticationFailedErrorFromMSALError(credNameDeviceCode, err)
+ }
+ c.account = ar.Account
+ logGetTokenSuccess(c, opts)
+ return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err
+}
+
+var _ azcore.TokenCredential = (*DeviceCodeCredential)(nil)
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/environment_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/environment_credential.go
new file mode 100644
index 00000000000..16c595d1d37
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/environment_credential.go
@@ -0,0 +1,122 @@
+//go:build go1.18
+// +build go1.18
+
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+package azidentity
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "os"
+ "strings"
+
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
+ "github.com/Azure/azure-sdk-for-go/sdk/internal/log"
+)
+
+const envVarSendCertChain = "AZURE_CLIENT_SEND_CERTIFICATE_CHAIN"
+
+// EnvironmentCredentialOptions contains optional parameters for EnvironmentCredential
+type EnvironmentCredentialOptions struct {
+ azcore.ClientOptions
+}
+
+// EnvironmentCredential authenticates a service principal with a secret or certificate, or a user with a password, depending
+// on environment variable configuration. It reads configuration from these variables, in the following order:
+//
+// Service principal with client secret
+//
+// AZURE_TENANT_ID: ID of the service principal's tenant. Also called its "directory" ID.
+//
+// AZURE_CLIENT_ID: the service principal's client ID
+//
+// AZURE_CLIENT_SECRET: one of the service principal's client secrets
+//
+// Service principal with certificate
+//
+// AZURE_TENANT_ID: ID of the service principal's tenant. Also called its "directory" ID.
+//
+// AZURE_CLIENT_ID: the service principal's client ID
+//
+// AZURE_CLIENT_CERTIFICATE_PATH: path to a PEM or PKCS12 certificate file including the unencrypted private key.
+//
+// User with username and password
+//
+// AZURE_TENANT_ID: (optional) tenant to authenticate in. Defaults to "organizations".
+//
+// AZURE_CLIENT_ID: client ID of the application the user will authenticate to
+//
+// AZURE_USERNAME: a username (usually an email address)
+//
+// AZURE_PASSWORD: the user's password
+type EnvironmentCredential struct {
+ cred azcore.TokenCredential
+}
+
+// NewEnvironmentCredential creates an EnvironmentCredential. Pass nil to accept default options.
+func NewEnvironmentCredential(options *EnvironmentCredentialOptions) (*EnvironmentCredential, error) {
+ if options == nil {
+ options = &EnvironmentCredentialOptions{}
+ }
+ tenantID := os.Getenv("AZURE_TENANT_ID")
+ if tenantID == "" {
+ return nil, errors.New("missing environment variable AZURE_TENANT_ID")
+ }
+ clientID := os.Getenv(azureClientID)
+ if clientID == "" {
+ return nil, errors.New("missing environment variable " + azureClientID)
+ }
+ if clientSecret := os.Getenv("AZURE_CLIENT_SECRET"); clientSecret != "" {
+ log.Write(EventAuthentication, "EnvironmentCredential will authenticate with ClientSecretCredential")
+ o := &ClientSecretCredentialOptions{ClientOptions: options.ClientOptions}
+ cred, err := NewClientSecretCredential(tenantID, clientID, clientSecret, o)
+ if err != nil {
+ return nil, err
+ }
+ return &EnvironmentCredential{cred: cred}, nil
+ }
+ if certPath := os.Getenv("AZURE_CLIENT_CERTIFICATE_PATH"); certPath != "" {
+ log.Write(EventAuthentication, "EnvironmentCredential will authenticate with ClientCertificateCredential")
+ certData, err := os.ReadFile(certPath)
+ if err != nil {
+ return nil, fmt.Errorf(`failed to read certificate file "%s": %v`, certPath, err)
+ }
+ certs, key, err := ParseCertificates(certData, nil)
+ if err != nil {
+ return nil, fmt.Errorf(`failed to load certificate from "%s": %v`, certPath, err)
+ }
+ o := &ClientCertificateCredentialOptions{ClientOptions: options.ClientOptions}
+ if v, ok := os.LookupEnv(envVarSendCertChain); ok {
+ o.SendCertificateChain = v == "1" || strings.ToLower(v) == "true"
+ }
+ cred, err := NewClientCertificateCredential(tenantID, clientID, certs, key, o)
+ if err != nil {
+ return nil, err
+ }
+ return &EnvironmentCredential{cred: cred}, nil
+ }
+ if username := os.Getenv("AZURE_USERNAME"); username != "" {
+ if password := os.Getenv("AZURE_PASSWORD"); password != "" {
+ log.Write(EventAuthentication, "EnvironmentCredential will authenticate with UsernamePasswordCredential")
+ o := &UsernamePasswordCredentialOptions{ClientOptions: options.ClientOptions}
+ cred, err := NewUsernamePasswordCredential(tenantID, clientID, username, password, o)
+ if err != nil {
+ return nil, err
+ }
+ return &EnvironmentCredential{cred: cred}, nil
+ }
+ return nil, errors.New("no value for AZURE_PASSWORD")
+ }
+ return nil, errors.New("incomplete environment variable configuration. Only AZURE_TENANT_ID and AZURE_CLIENT_ID are set")
+}
+
+// GetToken requests an access token from Azure Active Directory. This method is called automatically by Azure SDK clients.
+func (c *EnvironmentCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) {
+ return c.cred.GetToken(ctx, opts)
+}
+
+var _ azcore.TokenCredential = (*EnvironmentCredential)(nil)
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/errors.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/errors.go
new file mode 100644
index 00000000000..c60d13d0071
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/errors.go
@@ -0,0 +1,108 @@
+//go:build go1.18
+// +build go1.18
+
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+package azidentity
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+
+ "github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo"
+ msal "github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors"
+)
+
+// getResponseFromError retrieves the response carried by
+// an AuthenticationFailedError or MSAL CallErr, if any
+func getResponseFromError(err error) *http.Response {
+ var a *AuthenticationFailedError
+ var c msal.CallErr
+ var res *http.Response
+ if errors.As(err, &c) {
+ res = c.Resp
+ } else if errors.As(err, &a) {
+ res = a.RawResponse
+ }
+ return res
+}
+
+// AuthenticationFailedError indicates an authentication request has failed.
+type AuthenticationFailedError struct {
+ // RawResponse is the HTTP response motivating the error, if available.
+ RawResponse *http.Response
+
+ credType string
+ message string
+}
+
+func newAuthenticationFailedError(credType string, message string, resp *http.Response) error {
+ return &AuthenticationFailedError{credType: credType, message: message, RawResponse: resp}
+}
+
+func newAuthenticationFailedErrorFromMSALError(credType string, err error) error {
+ res := getResponseFromError(err)
+ return newAuthenticationFailedError(credType, err.Error(), res)
+}
+
+// Error implements the error interface. Note that the message contents are not contractual and can change over time.
+func (e *AuthenticationFailedError) Error() string {
+ if e.RawResponse == nil {
+ return e.credType + ": " + e.message
+ }
+ msg := &bytes.Buffer{}
+ fmt.Fprintf(msg, e.credType+" authentication failed\n")
+ fmt.Fprintf(msg, "%s %s://%s%s\n", e.RawResponse.Request.Method, e.RawResponse.Request.URL.Scheme, e.RawResponse.Request.URL.Host, e.RawResponse.Request.URL.Path)
+ fmt.Fprintln(msg, "--------------------------------------------------------------------------------")
+ fmt.Fprintf(msg, "RESPONSE %s\n", e.RawResponse.Status)
+ fmt.Fprintln(msg, "--------------------------------------------------------------------------------")
+ body, err := io.ReadAll(e.RawResponse.Body)
+ e.RawResponse.Body.Close()
+ if err != nil {
+ fmt.Fprintf(msg, "Error reading response body: %v", err)
+ } else if len(body) > 0 {
+ e.RawResponse.Body = io.NopCloser(bytes.NewReader(body))
+ if err := json.Indent(msg, body, "", " "); err != nil {
+ // failed to pretty-print so just dump it verbatim
+ fmt.Fprint(msg, string(body))
+ }
+ } else {
+ fmt.Fprint(msg, "Response contained no body")
+ }
+ fmt.Fprintln(msg, "\n--------------------------------------------------------------------------------")
+ return msg.String()
+}
+
+// NonRetriable indicates the request which provoked this error shouldn't be retried.
+func (*AuthenticationFailedError) NonRetriable() {
+ // marker method
+}
+
+var _ errorinfo.NonRetriable = (*AuthenticationFailedError)(nil)
+
+// credentialUnavailableError indicates a credential can't attempt
+// authentication because it lacks required data or state.
+type credentialUnavailableError struct {
+ credType string
+ message string
+}
+
+func newCredentialUnavailableError(credType, message string) error {
+ return &credentialUnavailableError{credType: credType, message: message}
+}
+
+func (e *credentialUnavailableError) Error() string {
+ return e.credType + ": " + e.message
+}
+
+// NonRetriable indicates that this error should not be retried.
+func (e *credentialUnavailableError) NonRetriable() {
+ // marker method
+}
+
+var _ errorinfo.NonRetriable = (*credentialUnavailableError)(nil)
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go
new file mode 100644
index 00000000000..e4aaf45b6dd
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go
@@ -0,0 +1,100 @@
+//go:build go1.18
+// +build go1.18
+
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+package azidentity
+
+import (
+ "context"
+ "errors"
+
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/public"
+)
+
+const credNameBrowser = "InteractiveBrowserCredentiall"
+
+// InteractiveBrowserCredentialOptions contains optional parameters for InteractiveBrowserCredential.
+type InteractiveBrowserCredentialOptions struct {
+ azcore.ClientOptions
+
+ // TenantID is the Azure Active Directory tenant the credential authenticates in. Defaults to the
+ // "organizations" tenant, which can authenticate work and school accounts.
+ TenantID string
+ // ClientID is the ID of the application users will authenticate to.
+ // Defaults to the ID of an Azure development application.
+ ClientID string
+ // RedirectURL will be supported in a future version but presently doesn't work: https://github.com/Azure/azure-sdk-for-go/issues/15632.
+ // Applications which have "http://localhost" registered as a redirect URL need not set this option.
+ RedirectURL string
+}
+
+func (o *InteractiveBrowserCredentialOptions) init() {
+ if o.TenantID == "" {
+ o.TenantID = organizationsTenantID
+ }
+ if o.ClientID == "" {
+ o.ClientID = developerSignOnClientID
+ }
+}
+
+// InteractiveBrowserCredential opens a browser to interactively authenticate a user.
+type InteractiveBrowserCredential struct {
+ client publicClient
+ options InteractiveBrowserCredentialOptions
+ account public.Account
+}
+
+// NewInteractiveBrowserCredential constructs a new InteractiveBrowserCredential. Pass nil to accept default options.
+func NewInteractiveBrowserCredential(options *InteractiveBrowserCredentialOptions) (*InteractiveBrowserCredential, error) {
+ cp := InteractiveBrowserCredentialOptions{}
+ if options != nil {
+ cp = *options
+ }
+ cp.init()
+ if !validTenantID(cp.TenantID) {
+ return nil, errors.New(tenantIDValidationErr)
+ }
+ authorityHost, err := setAuthorityHost(cp.Cloud)
+ if err != nil {
+ return nil, err
+ }
+ c, err := public.New(cp.ClientID,
+ public.WithAuthority(runtime.JoinPaths(authorityHost, cp.TenantID)),
+ public.WithHTTPClient(newPipelineAdapter(&cp.ClientOptions)),
+ )
+ if err != nil {
+ return nil, err
+ }
+ return &InteractiveBrowserCredential{options: cp, client: c}, nil
+}
+
+// GetToken requests an access token from Azure Active Directory. This method is called automatically by Azure SDK clients.
+func (c *InteractiveBrowserCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) {
+ if len(opts.Scopes) == 0 {
+ return azcore.AccessToken{}, errors.New(credNameBrowser + ": GetToken() requires at least one scope")
+ }
+ ar, err := c.client.AcquireTokenSilent(ctx, opts.Scopes, public.WithSilentAccount(c.account))
+ if err == nil {
+ logGetTokenSuccess(c, opts)
+ return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err
+ }
+
+ o := []public.InteractiveAuthOption{}
+ if c.options.RedirectURL != "" {
+ o = append(o, public.WithRedirectURI(c.options.RedirectURL))
+ }
+ ar, err = c.client.AcquireTokenInteractive(ctx, opts.Scopes, o...)
+ if err != nil {
+ return azcore.AccessToken{}, newAuthenticationFailedErrorFromMSALError(credNameBrowser, err)
+ }
+ c.account = ar.Account
+ logGetTokenSuccess(c, opts)
+ return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err
+}
+
+var _ azcore.TokenCredential = (*InteractiveBrowserCredential)(nil)
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/logging.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/logging.go
new file mode 100644
index 00000000000..569453e4622
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/logging.go
@@ -0,0 +1,30 @@
+//go:build go1.18
+// +build go1.18
+
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+package azidentity
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
+ "github.com/Azure/azure-sdk-for-go/sdk/internal/log"
+)
+
+// EventAuthentication entries contain information about authentication.
+// This includes information like the names of environment variables
+// used when obtaining credentials and the type of credential used.
+const EventAuthentication log.Event = "Authentication"
+
+func logGetTokenSuccess(cred azcore.TokenCredential, opts policy.TokenRequestOptions) {
+ if !log.Should(EventAuthentication) {
+ return
+ }
+ scope := strings.Join(opts.Scopes, ", ")
+ msg := fmt.Sprintf("%T.GetToken() acquired a token for scope %s\n", cred, scope)
+ log.Write(EventAuthentication, msg)
+}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go
new file mode 100644
index 00000000000..ce6e1e61474
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go
@@ -0,0 +1,393 @@
+//go:build go1.18
+// +build go1.18
+
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+package azidentity
+
+import (
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "os"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming"
+ "github.com/Azure/azure-sdk-for-go/sdk/internal/log"
+)
+
+const (
+ arcIMDSEndpoint = "IMDS_ENDPOINT"
+ identityEndpoint = "IDENTITY_ENDPOINT"
+ identityHeader = "IDENTITY_HEADER"
+ identityServerThumbprint = "IDENTITY_SERVER_THUMBPRINT"
+ headerMetadata = "Metadata"
+ imdsEndpoint = "http://169.254.169.254/metadata/identity/oauth2/token"
+ msiEndpoint = "MSI_ENDPOINT"
+ imdsAPIVersion = "2018-02-01"
+ azureArcAPIVersion = "2019-08-15"
+ serviceFabricAPIVersion = "2019-07-01-preview"
+
+ qpClientID = "client_id"
+ qpResID = "mi_res_id"
+)
+
+type msiType int
+
+const (
+ msiTypeAppService msiType = iota
+ msiTypeAzureArc
+ msiTypeCloudShell
+ msiTypeIMDS
+ msiTypeServiceFabric
+)
+
+// managedIdentityClient provides the base for authenticating in managed identity environments
+// This type includes an runtime.Pipeline and TokenCredentialOptions.
+type managedIdentityClient struct {
+ pipeline runtime.Pipeline
+ msiType msiType
+ endpoint string
+ id ManagedIDKind
+ imdsTimeout time.Duration
+}
+
+type wrappedNumber json.Number
+
+func (n *wrappedNumber) UnmarshalJSON(b []byte) error {
+ c := string(b)
+ if c == "\"\"" {
+ return nil
+ }
+ return json.Unmarshal(b, (*json.Number)(n))
+}
+
+// setIMDSRetryOptionDefaults sets zero-valued fields to default values appropriate for IMDS
+func setIMDSRetryOptionDefaults(o *policy.RetryOptions) {
+ if o.MaxRetries == 0 {
+ o.MaxRetries = 5
+ }
+ if o.MaxRetryDelay == 0 {
+ o.MaxRetryDelay = 1 * time.Minute
+ }
+ if o.RetryDelay == 0 {
+ o.RetryDelay = 2 * time.Second
+ }
+ if o.StatusCodes == nil {
+ o.StatusCodes = []int{
+ // IMDS docs recommend retrying 404, 429 and all 5xx
+ // https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/how-to-use-vm-token#error-handling
+ http.StatusNotFound, // 404
+ http.StatusTooManyRequests, // 429
+ http.StatusInternalServerError, // 500
+ http.StatusNotImplemented, // 501
+ http.StatusBadGateway, // 502
+ http.StatusGatewayTimeout, // 504
+ http.StatusHTTPVersionNotSupported, // 505
+ http.StatusVariantAlsoNegotiates, // 506
+ http.StatusInsufficientStorage, // 507
+ http.StatusLoopDetected, // 508
+ http.StatusNotExtended, // 510
+ http.StatusNetworkAuthenticationRequired, // 511
+ }
+ }
+ if o.TryTimeout == 0 {
+ o.TryTimeout = 1 * time.Minute
+ }
+}
+
+// newManagedIdentityClient creates a new instance of the ManagedIdentityClient with the ManagedIdentityCredentialOptions
+// that are passed into it along with a default pipeline.
+// options: ManagedIdentityCredentialOptions configure policies for the pipeline and the authority host that
+// will be used to retrieve tokens and authenticate
+func newManagedIdentityClient(options *ManagedIdentityCredentialOptions) (*managedIdentityClient, error) {
+ if options == nil {
+ options = &ManagedIdentityCredentialOptions{}
+ }
+ cp := options.ClientOptions
+ c := managedIdentityClient{id: options.ID, endpoint: imdsEndpoint, msiType: msiTypeIMDS}
+ env := "IMDS"
+ if endpoint, ok := os.LookupEnv(identityEndpoint); ok {
+ if _, ok := os.LookupEnv(identityHeader); ok {
+ if _, ok := os.LookupEnv(identityServerThumbprint); ok {
+ env = "Service Fabric"
+ c.endpoint = endpoint
+ c.msiType = msiTypeServiceFabric
+ } else {
+ env = "App Service"
+ c.endpoint = endpoint
+ c.msiType = msiTypeAppService
+ }
+ } else if _, ok := os.LookupEnv(arcIMDSEndpoint); ok {
+ env = "Azure Arc"
+ c.endpoint = endpoint
+ c.msiType = msiTypeAzureArc
+ }
+ } else if endpoint, ok := os.LookupEnv(msiEndpoint); ok {
+ env = "Cloud Shell"
+ c.endpoint = endpoint
+ c.msiType = msiTypeCloudShell
+ } else {
+ setIMDSRetryOptionDefaults(&cp.Retry)
+ }
+ c.pipeline = runtime.NewPipeline(component, version, runtime.PipelineOptions{}, &cp)
+
+ if log.Should(EventAuthentication) {
+ log.Writef(EventAuthentication, "Managed Identity Credential will use %s managed identity", env)
+ }
+
+ return &c, nil
+}
+
+// authenticate creates an authentication request for a Managed Identity and returns the resulting Access Token if successful.
+// ctx: The current context for controlling the request lifetime.
+// clientID: The client (application) ID of the service principal.
+// scopes: The scopes required for the token.
+func (c *managedIdentityClient) authenticate(ctx context.Context, id ManagedIDKind, scopes []string) (azcore.AccessToken, error) {
+ var cancel context.CancelFunc
+ if c.imdsTimeout > 0 && c.msiType == msiTypeIMDS {
+ ctx, cancel = context.WithTimeout(ctx, c.imdsTimeout)
+ defer cancel()
+ }
+
+ msg, err := c.createAuthRequest(ctx, id, scopes)
+ if err != nil {
+ return azcore.AccessToken{}, err
+ }
+
+ resp, err := c.pipeline.Do(msg)
+ if err != nil {
+ if cancel != nil && errors.Is(err, context.DeadlineExceeded) {
+ return azcore.AccessToken{}, newCredentialUnavailableError(credNameManagedIdentity, "IMDS token request timed out")
+ }
+ return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, err.Error(), nil)
+ }
+
+ // got a response, remove the IMDS timeout so future requests use the transport's configuration
+ c.imdsTimeout = 0
+
+ if runtime.HasStatusCode(resp, http.StatusOK, http.StatusCreated) {
+ return c.createAccessToken(resp)
+ }
+
+ if c.msiType == msiTypeIMDS && resp.StatusCode == 400 {
+ if id != nil {
+ return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, "the requested identity isn't assigned to this resource", resp)
+ }
+ return azcore.AccessToken{}, newCredentialUnavailableError(credNameManagedIdentity, "no default identity is assigned to this resource")
+ }
+
+ return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, "authentication failed", resp)
+}
+
+func (c *managedIdentityClient) createAccessToken(res *http.Response) (azcore.AccessToken, error) {
+ value := struct {
+ // these are the only fields that we use
+ Token string `json:"access_token,omitempty"`
+ RefreshToken string `json:"refresh_token,omitempty"`
+ ExpiresIn wrappedNumber `json:"expires_in,omitempty"` // this field should always return the number of seconds for which a token is valid
+ ExpiresOn interface{} `json:"expires_on,omitempty"` // the value returned in this field varies between a number and a date string
+ }{}
+ if err := runtime.UnmarshalAsJSON(res, &value); err != nil {
+ return azcore.AccessToken{}, fmt.Errorf("internal AccessToken: %v", err)
+ }
+ if value.ExpiresIn != "" {
+ expiresIn, err := json.Number(value.ExpiresIn).Int64()
+ if err != nil {
+ return azcore.AccessToken{}, err
+ }
+ return azcore.AccessToken{Token: value.Token, ExpiresOn: time.Now().Add(time.Second * time.Duration(expiresIn)).UTC()}, nil
+ }
+ switch v := value.ExpiresOn.(type) {
+ case float64:
+ return azcore.AccessToken{Token: value.Token, ExpiresOn: time.Unix(int64(v), 0).UTC()}, nil
+ case string:
+ if expiresOn, err := strconv.Atoi(v); err == nil {
+ return azcore.AccessToken{Token: value.Token, ExpiresOn: time.Unix(int64(expiresOn), 0).UTC()}, nil
+ }
+ return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, "unexpected expires_on value: "+v, res)
+ default:
+ msg := fmt.Sprintf("unsupported type received in expires_on: %T, %v", v, v)
+ return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, msg, res)
+ }
+}
+
+func (c *managedIdentityClient) createAuthRequest(ctx context.Context, id ManagedIDKind, scopes []string) (*policy.Request, error) {
+ switch c.msiType {
+ case msiTypeIMDS:
+ return c.createIMDSAuthRequest(ctx, id, scopes)
+ case msiTypeAppService:
+ return c.createAppServiceAuthRequest(ctx, id, scopes)
+ case msiTypeAzureArc:
+ // need to perform preliminary request to retreive the secret key challenge provided by the HIMDS service
+ key, err := c.getAzureArcSecretKey(ctx, scopes)
+ if err != nil {
+ msg := fmt.Sprintf("failed to retreive secret key from the identity endpoint: %v", err)
+ return nil, newAuthenticationFailedError(credNameManagedIdentity, msg, nil)
+ }
+ return c.createAzureArcAuthRequest(ctx, id, scopes, key)
+ case msiTypeServiceFabric:
+ return c.createServiceFabricAuthRequest(ctx, id, scopes)
+ case msiTypeCloudShell:
+ return c.createCloudShellAuthRequest(ctx, id, scopes)
+ default:
+ return nil, newCredentialUnavailableError(credNameManagedIdentity, "managed identity isn't supported in this environment")
+ }
+}
+
+func (c *managedIdentityClient) createIMDSAuthRequest(ctx context.Context, id ManagedIDKind, scopes []string) (*policy.Request, error) {
+ request, err := runtime.NewRequest(ctx, http.MethodGet, c.endpoint)
+ if err != nil {
+ return nil, err
+ }
+ request.Raw().Header.Set(headerMetadata, "true")
+ q := request.Raw().URL.Query()
+ q.Add("api-version", imdsAPIVersion)
+ q.Add("resource", strings.Join(scopes, " "))
+ if id != nil {
+ if id.idKind() == miResourceID {
+ q.Add(qpResID, id.String())
+ } else {
+ q.Add(qpClientID, id.String())
+ }
+ }
+ request.Raw().URL.RawQuery = q.Encode()
+ return request, nil
+}
+
+func (c *managedIdentityClient) createAppServiceAuthRequest(ctx context.Context, id ManagedIDKind, scopes []string) (*policy.Request, error) {
+ request, err := runtime.NewRequest(ctx, http.MethodGet, c.endpoint)
+ if err != nil {
+ return nil, err
+ }
+ request.Raw().Header.Set("X-IDENTITY-HEADER", os.Getenv(identityHeader))
+ q := request.Raw().URL.Query()
+ q.Add("api-version", "2019-08-01")
+ q.Add("resource", scopes[0])
+ if id != nil {
+ if id.idKind() == miResourceID {
+ q.Add(qpResID, id.String())
+ } else {
+ q.Add(qpClientID, id.String())
+ }
+ }
+ request.Raw().URL.RawQuery = q.Encode()
+ return request, nil
+}
+
+func (c *managedIdentityClient) createServiceFabricAuthRequest(ctx context.Context, id ManagedIDKind, scopes []string) (*policy.Request, error) {
+ request, err := runtime.NewRequest(ctx, http.MethodGet, c.endpoint)
+ if err != nil {
+ return nil, err
+ }
+ q := request.Raw().URL.Query()
+ request.Raw().Header.Set("Accept", "application/json")
+ request.Raw().Header.Set("Secret", os.Getenv(identityHeader))
+ q.Add("api-version", serviceFabricAPIVersion)
+ q.Add("resource", strings.Join(scopes, " "))
+ if id != nil {
+ log.Write(EventAuthentication, "WARNING: Service Fabric doesn't support selecting a user-assigned identity at runtime")
+ if id.idKind() == miResourceID {
+ q.Add(qpResID, id.String())
+ } else {
+ q.Add(qpClientID, id.String())
+ }
+ }
+ request.Raw().URL.RawQuery = q.Encode()
+ return request, nil
+}
+
+func (c *managedIdentityClient) getAzureArcSecretKey(ctx context.Context, resources []string) (string, error) {
+ // create the request to retreive the secret key challenge provided by the HIMDS service
+ request, err := runtime.NewRequest(ctx, http.MethodGet, c.endpoint)
+ if err != nil {
+ return "", err
+ }
+ request.Raw().Header.Set(headerMetadata, "true")
+ q := request.Raw().URL.Query()
+ q.Add("api-version", azureArcAPIVersion)
+ q.Add("resource", strings.Join(resources, " "))
+ request.Raw().URL.RawQuery = q.Encode()
+ // send the initial request to get the short-lived secret key
+ response, err := c.pipeline.Do(request)
+ if err != nil {
+ return "", err
+ }
+ // the endpoint is expected to return a 401 with the WWW-Authenticate header set to the location
+ // of the secret key file. Any other status code indicates an error in the request.
+ if response.StatusCode != 401 {
+ msg := fmt.Sprintf("expected a 401 response, received %d", response.StatusCode)
+ return "", newAuthenticationFailedError(credNameManagedIdentity, msg, response)
+ }
+ header := response.Header.Get("WWW-Authenticate")
+ if len(header) == 0 {
+ return "", errors.New("did not receive a value from WWW-Authenticate header")
+ }
+ // the WWW-Authenticate header is expected in the following format: Basic realm=/some/file/path.key
+ pos := strings.LastIndex(header, "=")
+ if pos == -1 {
+ return "", fmt.Errorf("did not receive a correct value from WWW-Authenticate header: %s", header)
+ }
+ key, err := ioutil.ReadFile(header[pos+1:])
+ if err != nil {
+ return "", fmt.Errorf("could not read file (%s) contents: %v", header[pos+1:], err)
+ }
+ return string(key), nil
+}
+
+func (c *managedIdentityClient) createAzureArcAuthRequest(ctx context.Context, id ManagedIDKind, resources []string, key string) (*policy.Request, error) {
+ request, err := runtime.NewRequest(ctx, http.MethodGet, c.endpoint)
+ if err != nil {
+ return nil, err
+ }
+ request.Raw().Header.Set(headerMetadata, "true")
+ request.Raw().Header.Set("Authorization", fmt.Sprintf("Basic %s", key))
+ q := request.Raw().URL.Query()
+ q.Add("api-version", azureArcAPIVersion)
+ q.Add("resource", strings.Join(resources, " "))
+ if id != nil {
+ log.Write(EventAuthentication, "WARNING: Azure Arc doesn't support user-assigned managed identities")
+ if id.idKind() == miResourceID {
+ q.Add(qpResID, id.String())
+ } else {
+ q.Add(qpClientID, id.String())
+ }
+ }
+ request.Raw().URL.RawQuery = q.Encode()
+ return request, nil
+}
+
+func (c *managedIdentityClient) createCloudShellAuthRequest(ctx context.Context, id ManagedIDKind, scopes []string) (*policy.Request, error) {
+ request, err := runtime.NewRequest(ctx, http.MethodPost, c.endpoint)
+ if err != nil {
+ return nil, err
+ }
+ request.Raw().Header.Set(headerMetadata, "true")
+ data := url.Values{}
+ data.Set("resource", strings.Join(scopes, " "))
+ dataEncoded := data.Encode()
+ body := streaming.NopCloser(strings.NewReader(dataEncoded))
+ if err := request.SetBody(body, "application/x-www-form-urlencoded"); err != nil {
+ return nil, err
+ }
+ if id != nil {
+ log.Write(EventAuthentication, "WARNING: Cloud Shell doesn't support user-assigned managed identities")
+ q := request.Raw().URL.Query()
+ if id.idKind() == miResourceID {
+ q.Add(qpResID, id.String())
+ } else {
+ q.Add(qpClientID, id.String())
+ }
+ }
+ return request, nil
+}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_credential.go
new file mode 100644
index 00000000000..f17ada1c3ed
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_credential.go
@@ -0,0 +1,105 @@
+//go:build go1.18
+// +build go1.18
+
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+package azidentity
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "strings"
+
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
+)
+
+const credNameManagedIdentity = "ManagedIdentityCredential"
+
+type managedIdentityIDKind int
+
+const (
+ miClientID managedIdentityIDKind = 0
+ miResourceID managedIdentityIDKind = 1
+)
+
+// ManagedIDKind identifies the ID of a managed identity as either a client or resource ID
+type ManagedIDKind interface {
+ fmt.Stringer
+ idKind() managedIdentityIDKind
+}
+
+// ClientID is the client ID of a user-assigned managed identity.
+type ClientID string
+
+func (ClientID) idKind() managedIdentityIDKind {
+ return miClientID
+}
+
+// String returns the string value of the ID.
+func (c ClientID) String() string {
+ return string(c)
+}
+
+// ResourceID is the resource ID of a user-assigned managed identity.
+type ResourceID string
+
+func (ResourceID) idKind() managedIdentityIDKind {
+ return miResourceID
+}
+
+// String returns the string value of the ID.
+func (r ResourceID) String() string {
+ return string(r)
+}
+
+// ManagedIdentityCredentialOptions contains optional parameters for ManagedIdentityCredential.
+type ManagedIdentityCredentialOptions struct {
+ azcore.ClientOptions
+
+ // ID is the ID of a managed identity the credential should authenticate. Set this field to use a specific identity
+ // instead of the hosting environment's default. The value may be the identity's client ID or resource ID, but note that
+ // some platforms don't accept resource IDs.
+ ID ManagedIDKind
+}
+
+// ManagedIdentityCredential authenticates an Azure managed identity in any hosting environment supporting managed identities.
+// This credential authenticates a system-assigned identity by default. Use ManagedIdentityCredentialOptions.ID to specify a
+// user-assigned identity. See Azure Active Directory documentation for more information about managed identities:
+// https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/overview
+type ManagedIdentityCredential struct {
+ id ManagedIDKind
+ client *managedIdentityClient
+}
+
+// NewManagedIdentityCredential creates a ManagedIdentityCredential. Pass nil to accept default options.
+func NewManagedIdentityCredential(options *ManagedIdentityCredentialOptions) (*ManagedIdentityCredential, error) {
+ if options == nil {
+ options = &ManagedIdentityCredentialOptions{}
+ }
+ client, err := newManagedIdentityClient(options)
+ if err != nil {
+ return nil, err
+ }
+ return &ManagedIdentityCredential{id: options.ID, client: client}, nil
+}
+
+// GetToken requests an access token from the hosting environment. This method is called automatically by Azure SDK clients.
+func (c *ManagedIdentityCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) {
+ if len(opts.Scopes) != 1 {
+ err := errors.New(credNameManagedIdentity + ": GetToken() requires exactly one scope")
+ return azcore.AccessToken{}, err
+ }
+ // managed identity endpoints require an AADv1 resource (i.e. token audience), not a v2 scope, so we remove "/.default" here
+ scopes := []string{strings.TrimSuffix(opts.Scopes[0], defaultSuffix)}
+ tk, err := c.client.authenticate(ctx, c.id, scopes)
+ if err != nil {
+ return azcore.AccessToken{}, err
+ }
+ logGetTokenSuccess(c, opts)
+ return tk, err
+}
+
+var _ azcore.TokenCredential = (*ManagedIdentityCredential)(nil)
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/username_password_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/username_password_credential.go
new file mode 100644
index 00000000000..8b02e7b47ba
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/username_password_credential.go
@@ -0,0 +1,79 @@
+//go:build go1.18
+// +build go1.18
+
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+package azidentity
+
+import (
+ "context"
+ "errors"
+
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/public"
+)
+
+const credNameUserPassword = "UsernamePasswordCredential"
+
+// UsernamePasswordCredentialOptions contains optional parameters for UsernamePasswordCredential.
+type UsernamePasswordCredentialOptions struct {
+ azcore.ClientOptions
+}
+
+// UsernamePasswordCredential authenticates a user with a password. Microsoft doesn't recommend this kind of authentication,
+// because it's less secure than other authentication flows. This credential is not interactive, so it isn't compatible
+// with any form of multi-factor authentication, and the application must already have user or admin consent.
+// This credential can only authenticate work and school accounts; it can't authenticate Microsoft accounts.
+type UsernamePasswordCredential struct {
+ client publicClient
+ username string
+ password string
+ account public.Account
+}
+
+// NewUsernamePasswordCredential creates a UsernamePasswordCredential. clientID is the ID of the application the user
+// will authenticate to. Pass nil for options to accept defaults.
+func NewUsernamePasswordCredential(tenantID string, clientID string, username string, password string, options *UsernamePasswordCredentialOptions) (*UsernamePasswordCredential, error) {
+ if !validTenantID(tenantID) {
+ return nil, errors.New(tenantIDValidationErr)
+ }
+ if options == nil {
+ options = &UsernamePasswordCredentialOptions{}
+ }
+ authorityHost, err := setAuthorityHost(options.Cloud)
+ if err != nil {
+ return nil, err
+ }
+ c, err := public.New(clientID,
+ public.WithAuthority(runtime.JoinPaths(authorityHost, tenantID)),
+ public.WithHTTPClient(newPipelineAdapter(&options.ClientOptions)),
+ )
+ if err != nil {
+ return nil, err
+ }
+ return &UsernamePasswordCredential{username: username, password: password, client: c}, nil
+}
+
+// GetToken requests an access token from Azure Active Directory. This method is called automatically by Azure SDK clients.
+func (c *UsernamePasswordCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) {
+ if len(opts.Scopes) == 0 {
+ return azcore.AccessToken{}, errors.New(credNameUserPassword + ": GetToken() requires at least one scope")
+ }
+ ar, err := c.client.AcquireTokenSilent(ctx, opts.Scopes, public.WithSilentAccount(c.account))
+ if err == nil {
+ logGetTokenSuccess(c, opts)
+ return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err
+ }
+ ar, err = c.client.AcquireTokenByUsernamePassword(ctx, opts.Scopes, c.username, c.password)
+ if err != nil {
+ return azcore.AccessToken{}, newAuthenticationFailedErrorFromMSALError(credNameUserPassword, err)
+ }
+ c.account = ar.Account
+ logGetTokenSuccess(c, opts)
+ return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err
+}
+
+var _ azcore.TokenCredential = (*UsernamePasswordCredential)(nil)
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go
new file mode 100644
index 00000000000..0fb125ace9e
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go
@@ -0,0 +1,15 @@
+//go:build go1.18
+// +build go1.18
+
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+package azidentity
+
+const (
+ // UserAgent is the string to be used in the user agent string when making requests.
+ component = "azidentity"
+
+ // Version is the semantic version (see http://semver.org) of this module.
+ version = "v1.1.0"
+)
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/diag/diag.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/diag/diag.go
index aa1ffa5834a..245af7d2bec 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/diag/diag.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/diag/diag.go
@@ -1,5 +1,5 @@
-//go:build go1.16
-// +build go1.16
+//go:build go1.18
+// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
@@ -15,7 +15,7 @@ import (
// Caller returns the file and line number of a frame on the caller's stack.
// If the funtion fails an empty string is returned.
// skipFrames - the number of frames to skip when determining the caller.
-// Passing a value of 0 will return the immediate caller of this function.
+// Passing a value of 0 will return the immediate caller of this function.
func Caller(skipFrames int) string {
if pc, file, line, ok := runtime.Caller(skipFrames + 1); ok {
// the skipFrames + 1 is to skip ourselves
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/diag/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/diag/doc.go
index 60d31b222f3..66bf13e5f04 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/diag/doc.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/diag/doc.go
@@ -1,5 +1,5 @@
-//go:build go1.16
-// +build go1.16
+//go:build go1.18
+// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo/doc.go
index 32d35b020e1..8c6eacb618a 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo/doc.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo/doc.go
@@ -1,5 +1,5 @@
-//go:build go1.16
-// +build go1.16
+//go:build go1.18
+// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo/errorinfo.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo/errorinfo.go
index 9ba0a83f1da..ade7b348e30 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo/errorinfo.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo/errorinfo.go
@@ -1,5 +1,5 @@
-//go:build go1.16
-// +build go1.16
+//go:build go1.18
+// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/log/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/log/doc.go
index 9ea5ba79ae8..d7876d297ae 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/log/doc.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/log/doc.go
@@ -1,5 +1,5 @@
-//go:build go1.16
-// +build go1.16
+//go:build go1.18
+// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/log/log.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/log/log.go
index a4e94a9def8..4f1dcf1b78a 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/log/log.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/log/log.go
@@ -1,5 +1,5 @@
-//go:build go1.16
-// +build go1.16
+//go:build go1.18
+// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
@@ -12,32 +12,13 @@ import (
"time"
)
+///////////////////////////////////////////////////////////////////////////////////////////////////
+// NOTE: The following are exported as public surface area from azcore. DO NOT MODIFY
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
// Event is used to group entries. Each group can be toggled on or off.
type Event string
-const (
- // EventRequest entries contain information about HTTP requests.
- // This includes information like the URL, query parameters, and headers.
- EventRequest Event = "Request"
-
- // EventResponse entries containe information about HTTP responses.
- // This includes information like the HTTP status code, headers, and request URL.
- EventResponse Event = "Response"
-
- // EventRetryPolicy entries contain information specific to the rety policy in use.
- EventRetryPolicy Event = "Retry"
-
- // EventLRO entries contian information specific to long-running operations.
- // This includes information like polling location, operation state, and sleep intervals.
- EventLRO Event = "LongRunningOperation"
-)
-
-// logger controls which events to log and writing to the underlying log.
-type logger struct {
- cls []Event
- lst func(Event, string)
-}
-
// SetEvents is used to control which events are written to
// the log. By default all log events are writen.
func SetEvents(cls ...Event) {
@@ -49,6 +30,10 @@ func SetListener(lst func(Event, string)) {
log.lst = lst
}
+///////////////////////////////////////////////////////////////////////////////////////////////////
+// END PUBLIC SURFACE AREA
+///////////////////////////////////////////////////////////////////////////////////////////////////
+
// Should returns true if the specified log event should be written to the log.
// By default all log events will be logged. Call SetEvents() to limit
// the log events for logging.
@@ -88,11 +73,17 @@ func Writef(cls Event, format string, a ...interface{}) {
log.lst(cls, fmt.Sprintf(format, a...))
}
-// TestResetEvents is used for testing purposes only.
+// TestResetEvents is used for TESTING PURPOSES ONLY.
func TestResetEvents() {
log.cls = nil
}
+// logger controls which events to log and writing to the underlying log.
+type logger struct {
+ cls []Event
+ lst func(Event, string)
+}
+
// the process-wide logger
var log logger
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/expiring_resource.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/temporal/resource.go
similarity index 74%
rename from vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/expiring_resource.go
rename to vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/temporal/resource.go
index 1c285e97df0..b23f3860c5e 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/expiring_resource.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/temporal/resource.go
@@ -1,21 +1,21 @@
-//go:build go1.16
-// +build go1.16
+//go:build go1.18
+// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
-package shared
+package temporal
import (
"sync"
"time"
)
-// AcquireResource abstracts a method for refreshing an expiring resource.
-type AcquireResource func(state interface{}) (newResource interface{}, newExpiration time.Time, err error)
+// AcquireResource abstracts a method for refreshing a temporal resource.
+type AcquireResource[TResource, TState any] func(state TState) (newResource TResource, newExpiration time.Time, err error)
-// ExpiringResource is a temporal resource (usually a credential) that requires periodic refreshing.
-type ExpiringResource struct {
+// Resource is a temporal resource (usually a credential) that requires periodic refreshing.
+type Resource[TResource, TState any] struct {
// cond is used to synchronize access to the shared resource embodied by the remaining fields
cond *sync.Cond
@@ -23,7 +23,7 @@ type ExpiringResource struct {
acquiring bool
// resource contains the value of the shared resource
- resource interface{}
+ resource TResource
// expiration indicates when the shared resource expires; it is 0 if the resource was never acquired
expiration time.Time
@@ -32,17 +32,17 @@ type ExpiringResource struct {
lastAttempt time.Time
// acquireResource is the callback function that actually acquires the resource
- acquireResource AcquireResource
+ acquireResource AcquireResource[TResource, TState]
}
-// NewExpiringResource creates a new ExpiringResource that uses the specified AcquireResource for refreshing.
-func NewExpiringResource(ar AcquireResource) *ExpiringResource {
- return &ExpiringResource{cond: sync.NewCond(&sync.Mutex{}), acquireResource: ar}
+// NewResource creates a new Resource that uses the specified AcquireResource for refreshing.
+func NewResource[TResource, TState any](ar AcquireResource[TResource, TState]) *Resource[TResource, TState] {
+ return &Resource[TResource, TState]{cond: sync.NewCond(&sync.Mutex{}), acquireResource: ar}
}
-// GetResource returns the underlying resource.
+// Get returns the underlying resource.
// If the resource is fresh, no refresh is performed.
-func (er *ExpiringResource) GetResource(state interface{}) (interface{}, error) {
+func (er *Resource[TResource, TState]) Get(state TState) (TResource, error) {
// If the resource is expiring within this time window, update it eagerly.
// This allows other threads/goroutines to keep running by using the not-yet-expired
// resource value while one thread/goroutine updates the resource.
@@ -87,7 +87,7 @@ func (er *ExpiringResource) GetResource(state interface{}) (interface{}, error)
if acquire {
// This thread/goroutine has been selected to acquire/update the resource
var expiration time.Time
- var newValue interface{}
+ var newValue TResource
er.lastAttempt = now
newValue, expiration, err = er.acquireResource(state)
@@ -109,3 +109,12 @@ func (er *ExpiringResource) GetResource(state interface{}) (interface{}, error)
}
return resource, err // Return the resource this thread/goroutine can use
}
+
+// Expire marks the resource as expired, ensuring it's refreshed on the next call to Get().
+func (er *Resource[TResource, TState]) Expire() {
+ er.cond.L.Lock()
+ defer er.cond.L.Unlock()
+
+ // Reset the expiration as if we never got this resource to begin with
+ er.expiration = time.Time{}
+}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/uuid/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/uuid/doc.go
index a9b0b5313a1..a3824bee8b5 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/uuid/doc.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/uuid/doc.go
@@ -1,5 +1,5 @@
-//go:build go1.16
-// +build go1.16
+//go:build go1.18
+// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/uuid/uuid.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/uuid/uuid.go
index 3670361c9e8..278ac9cd1c2 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/uuid/uuid.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/uuid/uuid.go
@@ -1,5 +1,5 @@
-//go:build go1.16
-// +build go1.16
+//go:build go1.18
+// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/CHANGELOG.md
index f828a177b64..db095b3a26a 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/CHANGELOG.md
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/CHANGELOG.md
@@ -1,12 +1,37 @@
# Release History
+## 0.4.1 (2022-05-12)
+
+### Other Changes
+* Updated to latest `azcore` and `internal` modules
+
+## 0.4.0 (2022-04-19)
+
+### Breaking Changes
+* Fixed Issue #17150 : Renaming/refactoring high level methods.
+* Fixed Issue #16972 : Constructors should return clients by reference.
+* Renaming the options bags to match the naming convention same as that of response. The behaviour of options bags remains the same.
+
+### Bugs Fixed
+* Fixed Issue #17515 : SetTags options bag missing leaseID.
+* Fixed Issue #17423 : Drop "Type" suffix from `GeoReplicationStatusType`.
+* Fixed Issue #17335 : Nil pointer exception when passing nil options bag in `ListBlobsFlat` API call.
+* Fixed Issue #17188 : `BlobURLParts` not supporting VersionID
+* Fixed Issue #17152 , Issue #17131 , Issue #17061 : `UploadStreamToBlockBlob` / `UploadStreamToBlockBlob` methods ignoring the options bag.
+* Fixed Issue #16920 : Fixing error handling example.
+* Fixed Issue #16786 : Refactoring of autorest code generation definition and adding necessary transformations.
+* Fixed Issue #16679 : Response parsing issue in List blobs API.
+
## 0.3.0 (2022-02-09)
### Breaking Changes
-* Updated to latest `azcore`. Public surface area is unchanged.
-* [#16978](https://github.com/Azure/azure-sdk-for-go/pull/16978): The `DownloadResponse.Body` parameter is now `*RetryReaderOptions`.
+
+* Updated to latest `azcore`. Public surface area is unchanged.
+* [#16978](https://github.com/Azure/azure-sdk-for-go/pull/16978): The `DownloadResponse.Body` parameter is
+ now `*RetryReaderOptions`.
### Bugs Fixed
+
* Fixed Issue #16193 : `azblob.GetSASToken` wrong signed resource.
* Fixed Issue #16223 : `HttpRange` does not expose its fields.
* Fixed Issue #16254 : Issue passing reader to upload `BlockBlobClient`
@@ -19,9 +44,11 @@
## 0.2.0 (2021-11-03)
### Breaking Changes
+
* Clients now have one constructor per authentication method
## 0.1.0 (2021-09-13)
### Features Added
+
* This is the initial preview release of the `azblob` library
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/README.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/README.md
index 5aa62bce398..32a10a005c1 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/README.md
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/README.md
@@ -2,8 +2,10 @@
## Introduction
-The Microsoft Azure Storage SDK for Go allows you to build applications that takes advantage of Azure's scalable cloud storage.
-This is the new beta client module for Azure Blob Storage, which follows our [Azure SDK Design Guidelines for Go](https://azure.github.io/azure-sdk/golang_introduction.html) and replaces the previous beta [azblob package](https://github.com/azure/azure-storage-blob-go).
+The Microsoft Azure Storage SDK for Go allows you to build applications that takes advantage of Azure's scalable cloud
+storage. This is the new beta client module for Azure Blob Storage, which follows
+our [Azure SDK Design Guidelines for Go](https://azure.github.io/azure-sdk/golang_introduction.html) and replaces the
+previous beta [azblob package](https://github.com/azure/azure-storage-blob-go).
## Getting Started
@@ -11,71 +13,77 @@ The Azure Blob SDK can access an Azure Storage account.
### Prerequisites
-* Go versions 1.16 or higher
-* You must have an [Azure storage account][azure_storage_account]. If you need to create one, you can use the [Azure Cloud Shell](https://shell.azure.com/bash) to create one with these commands (replace `my-resource-group` and `mystorageaccount` with your own unique names):
- (Optional) if you want a new resource group to hold the Storage Account:
- ```
- az group create --name my-resource-group --location westus2
- ```
- Create the storage account:
- ```
- az storage account create --resource-group my-resource-group --name mystorageaccount
- ```
-
- The storage account name can be queried with:
- ```
- az storage account show -n mystorageaccount -g my-resource-group --query "primaryEndpoints.blob"
- ```
- You can set this as an environment variable with:
- ```bash
- # PowerShell
- $ENV:AZURE_STORAGE_ACCOUNT_NAME="mystorageaccount"
- # bash
- export AZURE_STORAGE_ACCOUNT_NAME="mystorageaccount"
- ```
-
- Query your storage account keys:
- ```
- az storage account keys list --resource-group my-resource-group -n mystorageaccount
- ```
-
- Output:
- ```json
- [
- {
- "creationTime": "2022-02-07T17:18:44.088870+00:00",
- "keyName": "key1",
- "permissions": "FULL",
- "value": "..."
- },
- {
- "creationTime": "2022-02-07T17:18:44.088870+00:00",
- "keyName": "key2",
- "permissions": "FULL",
- "value": "..."
- }
- ]
- ```
-
- ```bash
- # PowerShell
- $ENV:AZURE_STORAGE_ACCOUNT_KEY=""
- # Bash
- export AZURE_STORAGE_ACCOUNT_KEY=""
- ```
- > You can obtain your account key from the Azure Portal under the "Access Keys" section on the left-hand pane of your storage account.
+* Go versions 1.18 or higher
+* You must have an [Azure storage account][azure_storage_account]. If you need to create one, you can use
+ the [Azure Cloud Shell](https://shell.azure.com/bash) to create one with these commands (replace `my-resource-group`
+ and `mystorageaccount` with your own unique names):
+ (Optional) if you want a new resource group to hold the Storage Account:
+ ```
+ az group create --name my-resource-group --location westus2
+ ```
+ Create the storage account:
+ ```
+ az storage account create --resource-group my-resource-group --name mystorageaccount
+ ```
+
+ The storage account name can be queried with:
+ ```
+ az storage account show -n mystorageaccount -g my-resource-group --query "primaryEndpoints.blob"
+ ```
+ You can set this as an environment variable with:
+ ```bash
+ # PowerShell
+ $ENV:AZURE_STORAGE_ACCOUNT_NAME="mystorageaccount"
+ # bash
+ export AZURE_STORAGE_ACCOUNT_NAME="mystorageaccount"
+ ```
+
+ Query your storage account keys:
+ ```
+ az storage account keys list --resource-group my-resource-group -n mystorageaccount
+ ```
+
+ Output:
+ ```json
+ [
+ {
+ "creationTime": "2022-02-07T17:18:44.088870+00:00",
+ "keyName": "key1",
+ "permissions": "FULL",
+ "value": "..."
+ },
+ {
+ "creationTime": "2022-02-07T17:18:44.088870+00:00",
+ "keyName": "key2",
+ "permissions": "FULL",
+ "value": "..."
+ }
+ ]
+ ```
+
+ ```bash
+ # PowerShell
+ $ENV:AZURE_STORAGE_ACCOUNT_KEY=""
+ # Bash
+ export AZURE_STORAGE_ACCOUNT_KEY=""
+ ```
+ > You can obtain your account key from the Azure Portal under the "Access Keys" section on the left-hand pane of your storage account.
#### Create account
-* To create a new Storage account, you can use [Azure Portal][azure_portal_create_account], [Azure PowerShell][azure_powershell_create_account], or [Azure CLI][azure_cli_create_account].
+* To create a new Storage account, you can use [Azure Portal][azure_portal_create_account]
+ , [Azure PowerShell][azure_powershell_create_account], or [Azure CLI][azure_cli_create_account].
### Install the package
+
* Install the Azure Blob Storage client module for Go with `go get`:
+
```bash
go get github.com/Azure/azure-sdk-for-go/sdk/storage/azblob
```
> Optional: If you are going to use AAD authentication, install the `azidentity` package:
+
```bash
go get github.com/Azure/azure-sdk-for-go/sdk/azidentity
```
@@ -86,11 +94,13 @@ go get github.com/Azure/azure-sdk-for-go/sdk/azidentity
* [Azure storage accounts][azure_storage_account].
* [Containers](https://azure.microsoft.com/en-in/overview/what-is-a-container/#overview) within those storage accounts.
-* [Blobs](https://azure.microsoft.com/en-in/services/storage/blobs/#overview) (block blobs/ page blobs/ append blobs) within those containers.
+* [Blobs](https://azure.microsoft.com/en-in/services/storage/blobs/#overview) (block blobs/ page blobs/ append blobs)
+ within those containers.
-Interaction with these resources starts with an instance of a [client](#clients).
-To create a client object, you will need the account's blob service endpoint URL and a credential that allows you to access the account.
-The `endpoint` can be found on the page for your storage account in the [Azure Portal][azure_portal_account_url] under the "Access Keys" section or by running the following Azure CLI command:
+Interaction with these resources starts with an instance of a [client](#clients). To create a client object, you will
+need the account's blob service endpoint URL and a credential that allows you to access the account. The `endpoint` can
+be found on the page for your storage account in the [Azure Portal][azure_portal_account_url] under the "Access Keys"
+section or by running the following Azure CLI command:
```bash
# Get the blob service URL for the account
@@ -98,6 +108,7 @@ az storage account show -n mystorageaccount -g my-resource-group --query "primar
```
Once you have the account URL, it can be used to create the service client:
+
```golang
cred, err := azblob.NewSharedKeyCredential("myAccountName", "myAccountKey")
handle(err)
@@ -105,12 +116,13 @@ serviceClient, err := azblob.NewServiceClientWithSharedKey("https://.blob.core.windows.net/
- service, err := NewServiceClientWithSharedKey(fmt.Sprintf("https://%s.blob.core.windows.net/", accountName), cred, nil)
- handle(err)
+accountKey, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_KEY")
+if !ok {
+ handle(errors.New("AZURE_STORAGE_ACCOUNT_KEY could not be found"))
+}
+cred, err := NewSharedKeyCredential(accountName, accountKey)
+handle(err)
- // All operations in the Azure Blob Storage SDK for Go operate on a context.Context, allowing you to control cancellation/timeout.
- ctx := context.Background() // This example has no expiry.
+// Open up a service client.
+// You'll need to specify a service URL, which for blob endpoints usually makes up the syntax http(s)://.blob.core.windows.net/
+service, err := NewServiceClientWithSharedKey(fmt.Sprintf("https://%s.blob.core.windows.net/", accountName), cred, nil)
+handle(err)
- // This example showcases several common operations to help you get started, such as:
+// All operations in the Azure Blob Storage SDK for Go operate on a context.Context, allowing you to control cancellation/timeout.
+ctx := context.Background() // This example has no expiry.
- // ===== 1. Creating a container =====
+// This example showcases several common operations to help you get started, such as:
- // First, branch off of the service client and create a container client.
- container := service.NewContainerClient("mycontainer")
- // Then, fire off a create operation on the container client.
- // Note that, all service-side requests have an options bag attached, allowing you to specify things like metadata, public access types, etc.
- // Specifying nil omits all options.
- _, err = container.Create(ctx, nil)
- handle(err)
+// ===== 1. Creating a container =====
- // ===== 2. Uploading/downloading a block blob =====
- // We'll specify our data up-front, rather than reading a file for simplicity's sake.
- data := "Hello world!"
+// First, branch off of the service client and create a container client.
+container := service.NewContainerClient("mycontainer")
- // Branch off of the container into a block blob client
- blockBlob := container.NewBlockBlobClient("HelloWorld.txt")
+// Then, fire off a create operation on the container client.
+// Note that, all service-side requests have an options bag attached, allowing you to specify things like metadata, public access types, etc.
+// Specifying nil omits all options.
+_, err = container.Create(ctx, nil)
+handle(err)
- // Upload data to the block blob
- _, err = blockBlob.Upload(ctx, NopCloser(strings.NewReader(data)), nil)
- handle(err)
+// ===== 2. Uploading/downloading a block blob =====
+// We'll specify our data up-front, rather than reading a file for simplicity's sake.
+data := "Hello world!"
- // Download the blob's contents and ensure that the download worked properly
- get, err := blockBlob.Download(ctx, nil)
- handle(err)
+// Branch off of the container into a block blob client
+blockBlob := container.NewBlockBlobClient("HelloWorld.txt")
- // Open a buffer, reader, and then download!
- downloadedData := &bytes.Buffer{}
- reader := get.Body(RetryReaderOptions{}) // RetryReaderOptions has a lot of in-depth tuning abilities, but for the sake of simplicity, we'll omit those here.
- _, err = downloadedData.ReadFrom(reader)
- handle(err)
- err = reader.Close()
+// Upload data to the block blob
+_, err = blockBlob.Upload(ctx, NopCloser(strings.NewReader(data)), nil)
+handle(err)
+
+// Download the blob's contents and ensure that the download worked properly
+get, err := blockBlob.Download(ctx, nil)
+handle(err)
+
+// Open a buffer, reader, and then download!
+downloadedData := &bytes.Buffer{}
+// RetryReaderOptions has a lot of in-depth tuning abilities, but for the sake of simplicity, we'll omit those here.
+reader := get.Body(RetryReaderOptions{})
+_, err = downloadedData.ReadFrom(reader)
+handle(err)
+err = reader.Close()
+handle(err)
+if data != downloadedData.String() {
+ handle(errors.New("downloaded data doesn't match uploaded data"))
+}
+
+// ===== 3. list blobs =====
+// The ListBlobs and ListContainers APIs return two channels, a values channel, and an errors channel.
+// You should enumerate on a range over the values channel, and then check the errors channel, as only ONE value will ever be passed to the errors channel.
+// The AutoPagerTimeout defines how long it will wait to place into the items channel before it exits & cleans itself up. A zero time will result in no timeout.
+pager := container.ListBlobsFlat(nil)
+
+for pager.NextPage(ctx) {
+ resp := pager.PageResponse()
+
+ for _, v := range resp.ContainerListBlobFlatSegmentResult.Segment.BlobItems {
+ fmt.Println(*v.Name)
+ }
+}
+
+if err = pager.Err(); err != nil {
handle(err)
- if data != downloadedData.String() {
- handle(errors.New("downloaded data doesn't match uploaded data"))
- }
-
- // ===== 3. list blobs =====
- // The ListBlobs and ListContainers APIs return two channels, a values channel, and an errors channel.
- // You should enumerate on a range over the values channel, and then check the errors channel, as only ONE value will ever be passed to the errors channel.
- // The AutoPagerTimeout defines how long it will wait to place into the items channel before it exits & cleans itself up. A zero time will result in no timeout.
- pager := container.ListBlobsFlat(nil)
-
- for pager.NextPage(ctx) {
- resp := pager.PageResponse()
-
- for _, v := range resp.ContainerListBlobFlatSegmentResult.Segment.BlobItems {
- fmt.Println(*v.Name)
- }
- }
-
- if err = pager.Err(); err != nil {
- handle(err)
- }
-
- // Delete the blob we created earlier.
- _, err = blockBlob.Delete(ctx, nil)
- handle(err)
-
- // Delete the container we created earlier.
- _, err = container.Delete(ctx, nil)
- handle(err)
+}
+
+// Delete the blob we created earlier.
+_, err = blockBlob.Delete(ctx, nil)
+handle(err)
+
+// Delete the container we created earlier.
+_, err = container.Delete(ctx, nil)
+handle(err)
```
## Troubleshooting
@@ -284,12 +302,10 @@ All I/O operations will return an `error` that can be investigated to discover m
addition, you can investigate the raw response of any response object:
```golang
-var errResp azcore.HTTPResponse
+var storageErr *azblob.StorageError
resp, err := serviceClient.CreateContainer(context.Background(), "testcontainername", nil)
-if err != nil {
- if errors.As(err, &errResp) {
- // do something with errResp.RawResponse()
- }
+if err != nil && errors.As(err, &storageErr) {
+ // do something with storageErr.Response()
}
```
@@ -308,8 +324,8 @@ be like the following:
```golang
import azlog "github.com/Azure/azure-sdk-for-go/sdk/azcore/log"
// Set log to output to the console
-azlog.SetListener(func(cls azlog.Classification, msg string) {
- fmt.Println(msg) // printing log out to the console
+azlog.SetListener(func (cls azlog.Classification, msg string) {
+ fmt.Println(msg) // printing log out to the console
})
// Includes only requests and responses in credential logs
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/autorest.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/autorest.md
new file mode 100644
index 00000000000..0a391904aac
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/autorest.md
@@ -0,0 +1,171 @@
+# Code Generation - Azure Blob SDK for Golang
+
+
+
+```bash
+cd swagger
+autorest autorest.md
+gofmt -w generated/*
+```
+
+### Settings
+
+```yaml
+go: true
+clear-output-folder: false
+version: "^3.0.0"
+license-header: MICROSOFT_MIT_NO_VERSION
+input-file: "https://raw.githubusercontent.com/Azure/azure-rest-api-specs/main/specification/storage/data-plane/Microsoft.BlobStorage/preview/2020-10-02/blob.json"
+module: "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob"
+credential-scope: "https://storage.azure.com/.default"
+output-folder: internal/
+file-prefix: "zz_generated_"
+openapi-type: "data-plane"
+verbose: true
+security: AzureKey
+module-version: "0.3.0"
+modelerfour:
+ group-parameters: false
+ seal-single-value-enum-by-default: true
+ lenient-model-deduplication: true
+export-clients: false
+use: "@autorest/go@4.0.0-preview.36"
+```
+
+### Fix BlobMetadata.
+
+``` yaml
+directive:
+- from: swagger-document
+ where: $.definitions
+ transform: >
+ delete $.BlobMetadata["properties"];
+
+```
+
+### Don't include container name or blob in path - we have direct URIs.
+
+``` yaml
+directive:
+- from: swagger-document
+ where: $["x-ms-paths"]
+ transform: >
+ for (const property in $)
+ {
+ if (property.includes('/{containerName}/{blob}'))
+ {
+ $[property]["parameters"] = $[property]["parameters"].filter(function(param) { return (typeof param['$ref'] === "undefined") || (false == param['$ref'].endsWith("#/parameters/ContainerName") && false == param['$ref'].endsWith("#/parameters/Blob"))});
+ }
+ else if (property.includes('/{containerName}'))
+ {
+ $[property]["parameters"] = $[property]["parameters"].filter(function(param) { return (typeof param['$ref'] === "undefined") || (false == param['$ref'].endsWith("#/parameters/ContainerName"))});
+ }
+ }
+```
+
+### Remove DataLake stuff.
+
+``` yaml
+directive:
+- from: swagger-document
+ where: $["x-ms-paths"]
+ transform: >
+ for (const property in $)
+ {
+ if (property.includes('filesystem'))
+ {
+ delete $[property];
+ }
+ }
+```
+
+### Remove DataLakeStorageError
+
+``` yaml
+directive:
+- from: swagger-document
+ where: $.definitions
+ transform: >
+ delete $.DataLakeStorageError;
+```
+
+### Fix 304s
+
+``` yaml
+directive:
+- from: swagger-document
+ where: $["x-ms-paths"]["/{containerName}/{blob}"]
+ transform: >
+ $.get.responses["304"] = {
+ "description": "The condition specified using HTTP conditional header(s) is not met.",
+ "x-az-response-name": "ConditionNotMetError",
+ "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }
+ };
+```
+
+### Fix GeoReplication
+
+``` yaml
+directive:
+- from: swagger-document
+ where: $.definitions
+ transform: >
+ delete $.GeoReplication.properties.Status["x-ms-enum"];
+ $.GeoReplication.properties.Status["x-ms-enum"] = {
+ "name": "BlobGeoReplicationStatus",
+ "modelAsString": false
+ };
+```
+
+### Fix RehydratePriority
+
+``` yaml
+directive:
+- from: swagger-document
+ where: $.definitions
+ transform: >
+ delete $.RehydratePriority["x-ms-enum"];
+ $.RehydratePriority["x-ms-enum"] = {
+ "name": "RehydratePriority",
+ "modelAsString": false
+ };
+```
+
+### Fix BlobDeleteType
+
+``` yaml
+directive:
+- from: swagger-document
+ where: $.parameters
+ transform: >
+ delete $.BlobDeleteType.enum;
+ $.BlobDeleteType.enum = [
+ "None",
+ "Permanent"
+ ];
+```
+
+### Fix EncryptionAlgorithm
+
+``` yaml
+directive:
+- from: swagger-document
+ where: $.parameters
+ transform: >
+ delete $.EncryptionAlgorithm.enum;
+ $.EncryptionAlgorithm.enum = [
+ "None",
+ "AES256"
+ ];
+```
+
+### Fix XML string "ObjectReplicationMetadata" to "OrMetadata"
+
+``` yaml
+directive:
+- from: swagger-document
+ where: $.definitions
+ transform: >
+ $.BlobItemInternal.properties["OrMetadata"] = $.BlobItemInternal.properties["ObjectReplicationMetadata"];
+ delete $.BlobItemInternal.properties["ObjectReplicationMetadata"];
+```
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bytes_writer.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bytes_writer.go
index f2ff4f7193c..14c7feda110 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bytes_writer.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bytes_writer.go
@@ -1,3 +1,6 @@
+//go:build go1.18
+// +build go1.18
+
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/chunkwriting.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/chunkwriting.go
index 5f4fc4d8957..d5ccdfb4076 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/chunkwriting.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/chunkwriting.go
@@ -1,3 +1,6 @@
+//go:build go1.18
+// +build go1.18
+
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
@@ -21,8 +24,8 @@ import (
// blockWriter provides methods to upload blocks that represent a file to a server and commit them.
// This allows us to provide a local implementation that fakes the server for hermetic testing.
type blockWriter interface {
- StageBlock(context.Context, string, io.ReadSeekCloser, *StageBlockOptions) (BlockBlobStageBlockResponse, error)
- CommitBlockList(context.Context, []string, *CommitBlockListOptions) (BlockBlobCommitBlockListResponse, error)
+ StageBlock(context.Context, string, io.ReadSeekCloser, *BlockBlobStageBlockOptions) (BlockBlobStageBlockResponse, error)
+ CommitBlockList(context.Context, []string, *BlockBlobCommitBlockListOptions) (BlockBlobCommitBlockListResponse, error)
}
// copyFromReader copies a source io.Reader to blob storage using concurrent uploads.
@@ -33,7 +36,7 @@ type blockWriter interface {
// well, 4 MiB or 8 MiB, and auto-scale to as many goroutines within the memory limit. This gives a single dial to tweak and we can
// choose a max value for the memory setting based on internal transfers within Azure (which will give us the maximum throughput model).
// We can even provide a utility to dial this number in for customer networks to optimize their copies.
-func copyFromReader(ctx context.Context, from io.Reader, to blockWriter, o UploadStreamToBlockBlobOptions) (BlockBlobCommitBlockListResponse, error) {
+func copyFromReader(ctx context.Context, from io.Reader, to blockWriter, o UploadStreamOptions) (BlockBlobCommitBlockListResponse, error) {
if err := o.defaults(); err != nil {
return BlockBlobCommitBlockListResponse{}, err
}
@@ -90,7 +93,7 @@ type copier struct {
to blockWriter
// o contains our options for uploading.
- o UploadStreamToBlockBlobOptions
+ o UploadStreamOptions
// id provides the ids for each chunk.
id *id
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/ci.yml b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/ci.yml
index 29a16aba8a1..e0623f50e85 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/ci.yml
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/ci.yml
@@ -25,3 +25,4 @@ stages:
- template: /eng/pipelines/templates/jobs/archetype-sdk-client.yml
parameters:
ServiceDirectory: 'storage/azblob'
+ RunLiveTests: true
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/connection.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/connection.go
new file mode 100644
index 00000000000..c5d501c6610
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/connection.go
@@ -0,0 +1,39 @@
+//go:build go1.18
+// +build go1.18
+
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
+// Code generated by Microsoft (R) AutoRest Code Generator.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+package azblob
+
+import (
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
+)
+
+type connection struct {
+ u string
+ p runtime.Pipeline
+}
+
+// newConnection creates an instance of the connection type with the specified endpoint.
+// Pass nil to accept the default options; this is the same as passing a zero-value options.
+func newConnection(endpoint string, options *azcore.ClientOptions) *connection {
+ cp := azcore.ClientOptions{}
+ if options != nil {
+ cp = *options
+ }
+ return &connection{u: endpoint, p: runtime.NewPipeline(moduleName, moduleVersion, runtime.PipelineOptions{}, &cp)}
+}
+
+// Endpoint returns the connection's endpoint.
+func (c *connection) Endpoint() string {
+ return c.u
+}
+
+// Pipeline returns the connection's pipeline.
+func (c *connection) Pipeline() runtime.Pipeline {
+ return c.p
+}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/constants.go
index e0945925bfb..c1c336ed466 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/constants.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/constants.go
@@ -1,35 +1,46 @@
+//go:build go1.18
+// +build go1.18
+
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package azblob
+var SASVersion = "2019-12-12"
+
//nolint
const (
- SASVersion = "2019-12-12"
-
- headerAuthorization = "Authorization"
- headerXmsDate = "x-ms-date"
- headerContentLength = "Content-Length"
- headerContentEncoding = "Content-Encoding"
- headerContentLanguage = "Content-Language"
- headerContentType = "Content-Type"
- headerContentMD5 = "Content-MD5"
- headerIfModifiedSince = "If-Modified-Since"
- headerIfMatch = "If-Match"
- headerIfNoneMatch = "If-None-Match"
- headerIfUnmodifiedSince = "If-Unmodified-Since"
- headerRange = "Range"
- headerDate = "Date"
- headerXmsVersion = "x-ms-version"
- headerAcceptCharset = "Accept-Charset"
- headerDataServiceVersion = "DataServiceVersion"
- headerMaxDataServiceVersion = "MaxDataServiceVersion"
- headerContentTransferEncoding = "Content-Transfer-Encoding"
-
- etagOData = "odata.etag"
- rfc3339 = "2006-01-02T15:04:05.9999999Z"
- timestamp = "Timestamp"
- etag = "ETag"
+ // BlockBlobMaxUploadBlobBytes indicates the maximum number of bytes that can be sent in a call to Upload.
+ BlockBlobMaxUploadBlobBytes = 256 * 1024 * 1024 // 256MB
+
+ // BlockBlobMaxStageBlockBytes indicates the maximum number of bytes that can be sent in a call to StageBlock.
+ BlockBlobMaxStageBlockBytes = 4000 * 1024 * 1024 // 4GB
+ // BlockBlobMaxBlocks indicates the maximum number of blocks allowed in a block blob.
+ BlockBlobMaxBlocks = 50000
+
+ // PageBlobPageBytes indicates the number of bytes in a page (512).
+ PageBlobPageBytes = 512
+
+ // BlobDefaultDownloadBlockSize is default block size
+ BlobDefaultDownloadBlockSize = int64(4 * 1024 * 1024) // 4MB
+)
+
+const (
+ headerAuthorization = "Authorization"
+ headerXmsDate = "x-ms-date"
+ headerContentLength = "Content-Length"
+ headerContentEncoding = "Content-Encoding"
+ headerContentLanguage = "Content-Language"
+ headerContentType = "Content-Type"
+ headerContentMD5 = "Content-MD5"
+ headerIfModifiedSince = "If-Modified-Since"
+ headerIfMatch = "If-Match"
+ headerIfNoneMatch = "If-None-Match"
+ headerIfUnmodifiedSince = "If-Unmodified-Since"
+ headerRange = "Range"
+)
+
+const (
tokenScope = "https://storage.azure.com/.default"
)
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/doc.go
index 7f508001027..c2426eb7005 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/doc.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/doc.go
@@ -1,5 +1,5 @@
-//go:build go1.16
-// +build go1.16
+//go:build go1.18
+// +build go1.18
// Copyright 2017 Microsoft Corporation. All rights reserved.
// Use of this source code is governed by an MIT
@@ -46,12 +46,17 @@ Use the key as the credential parameter to authenticate the client:
if !ok {
panic("AZURE_STORAGE_ACCOUNT_KEY could not be found")
}
- credential, err := NewSharedKeyCredential(accountName, accountKey)
+
+ serviceURL := fmt.Sprintf("https://%s.blob.core.windows.net/", accountName)
+
+ cred, err := azblob.NewSharedKeyCredential(accountName, accountKey)
handle(err)
- serviceClient, err := azblob.NewServiceClient("https://.blob.core.windows.net/", cred, nil)
+ serviceClient, err := azblob.NewServiceClientWithSharedKey(serviceURL, cred, nil)
handle(err)
+ fmt.Println(serviceClient.URL())
+
Using a Connection String
Depending on your use case and authorization method, you may prefer to initialize a client instance with a connection string instead of providing the account URL and credential separately.
@@ -74,28 +79,27 @@ You can generate a SAS token from the Azure Portal under Shared Access Signature
if !ok {
panic("AZURE_STORAGE_ACCOUNT_KEY could not be found")
}
- credential, err := azblob.NewSharedKeyCredential(accountName, accountKey)
- handle(err)
+ serviceURL := fmt.Sprintf("https://%s.blob.core.windows.net/", accountName)
- serviceClient, err := azblob.NewServiceClient(fmt.Sprintf("https://%s.blob.core.windows.net/", accountName), credential, nil)
+ cred, err := azblob.NewSharedKeyCredential(accountName, accountKey)
handle(err)
-
- // Provide the convenience function with relevant info
- accountSAS, err := serviceClient.GetSASToken(AccountSASResourceTypes{Object: true, Service: true, Container: true}, AccountSASPermissions{Read: true, List: true}, AccountSASServices{Blob: true}, time.Now(), time.Now().Add(48*time.Hour))
+ serviceClient, err := azblob.NewServiceClientWithSharedKey(serviceURL, cred, nil)
handle(err)
+ fmt.Println(serviceClient.URL())
- urlToSend := fmt.Sprintf("https://%s.blob.core.windows.net/?%s", accountName, accountSAS)
- // You can hand off this URL to someone else via any mechanism you choose.
+ // Alternatively, you can create SAS on the fly
- // ******************************************
+ resources := azblob.AccountSASResourceTypes{Service: true}
+ permission := azblob.AccountSASPermissions{Read: true}
+ start := time.Now()
+ expiry := start.AddDate(0, 0, 1)
+ serviceURLWithSAS, err := serviceClient.GetSASURL(resources, permission, start, expiry)
+ handle(err)
- // When someone receives the URL, they can access the resource using it in code like this, or a tool of some variety.
- serviceClient, err = azblob.NewServiceClient(urlToSend, azcore.NewAnonymousCredential(), nil)
+ serviceClientWithSAS, err := azblob.NewServiceClientWithNoCredential(serviceURLWithSAS, nil)
handle(err)
- // You can also break a blob URL up into its constituent parts
- blobURLParts := azblob.NewBlobURLParts(serviceClient.URL())
- fmt.Printf("SAS expiry time = %s\n", blobURLParts.SAS.ExpiryTime())
+ fmt.Println(serviceClientWithSAS.URL())
Types of Clients
@@ -118,76 +122,78 @@ There are three different clients provided to interact with the various componen
Examples
- // Use your storage account's name and key to create a credential object, used to access your account.
- // You can obtain these details from the Azure Portal.
+ // Your account name and key can be obtained from the Azure Portal.
accountName, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME")
if !ok {
- handle(errors.New("AZURE_STORAGE_ACCOUNT_NAME could not be found"))
+ panic("AZURE_STORAGE_ACCOUNT_NAME could not be found")
}
accountKey, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_KEY")
if !ok {
- handle(errors.New("AZURE_STORAGE_ACCOUNT_KEY could not be found"))
+ panic("AZURE_STORAGE_ACCOUNT_KEY could not be found")
}
- cred, err := NewSharedKeyCredential(accountName, accountKey)
+ cred, err := azblob.NewSharedKeyCredential(accountName, accountKey)
handle(err)
- // Open up a service client.
- // You'll need to specify a service URL, which for blob endpoints usually makes up the syntax http(s)://.blob.core.windows.net/
- service, err := NewServiceClient(fmt.Sprintf("https://%s.blob.core.windows.net/", accountName), cred, nil)
+ // The service URL for blob endpoints is usually in the form: http(s)://.blob.core.windows.net/
+ serviceClient, err := azblob.NewServiceClientWithSharedKey(fmt.Sprintf("https://%s.blob.core.windows.net/", accountName), cred, nil)
handle(err)
- // All operations in the Azure Blob Storage SDK for Go operate on a context.Context, allowing you to control cancellation/timeout.
- ctx := context.Background() // This example has no expiry.
-
- // This example showcases several common operations to help you get started, such as:
+ // ===== 1. Create a container =====
- // ===== 1. Creating a container =====
+ // First, create a container client, and use the Create method to create a new container in your account
+ containerClient, err := serviceClient.NewContainerClient("testcontainer")
+ handle(err)
- // First, branch off of the service client and create a container client.
- container := service.NewContainerClient("mycontainer")
- // Then, fire off a create operation on the container client.
- // Note that, all service-side requests have an options bag attached, allowing you to specify things like metadata, public access types, etc.
- // Specifying nil omits all options.
- _, err = container.Create(ctx, nil)
+ // All APIs have an options' bag struct as a parameter.
+ // The options' bag struct allows you to specify optional parameters such as metadata, public access types, etc.
+ // If you want to use the default options, pass in nil.
+ _, err = containerClient.Create(context.TODO(), nil)
handle(err)
- // ===== 2. Uploading/downloading a block blob =====
- // We'll specify our data up-front, rather than reading a file for simplicity's sake.
- data := "Hello world!"
+ // ===== 2. Upload and Download a block blob =====
+ uploadData := "Hello world!"
- // Branch off of the container into a block blob client
- blockBlob := container.NewBlockBlobClient("HelloWorld.txt")
+ // Create a new blockBlobClient from the containerClient
+ blockBlobClient, err := containerClient.NewBlockBlobClient("HelloWorld.txt")
+ handle(err)
// Upload data to the block blob
- _, err = blockBlob.Upload(ctx, NopCloser(strings.NewReader(data)), nil)
+ blockBlobUploadOptions := azblob.BlockBlobUploadOptions{
+ Metadata: map[string]string{"Foo": "Bar"},
+ TagsMap: map[string]string{"Year": "2022"},
+ }
+ _, err = blockBlobClient.Upload(context.TODO(), streaming.NopCloser(strings.NewReader(uploadData)), &blockBlobUploadOptions)
handle(err)
// Download the blob's contents and ensure that the download worked properly
- get, err := blockBlob.Download(ctx, nil)
+ blobDownloadResponse, err := blockBlobClient.Download(context.TODO(), nil)
handle(err)
- // Open a buffer, reader, and then download!
- downloadedData := &bytes.Buffer{}
- reader := get.Body(RetryReaderOptions{}) // RetryReaderOptions has a lot of in-depth tuning abilities, but for the sake of simplicity, we'll omit those here.
- _, err = downloadedData.ReadFrom(reader)
- handle(err)
- err = reader.Close()
+ // Use the bytes.Buffer object to read the downloaded data.
+ // RetryReaderOptions has a lot of in-depth tuning abilities, but for the sake of simplicity, we'll omit those here.
+ reader := blobDownloadResponse.Body(nil)
+ downloadData, err := ioutil.ReadAll(reader)
handle(err)
- if data != downloadedData.String() {
- handle(errors.New("downloaded data doesn't match uploaded data"))
+ if string(downloadData) != uploadData {
+ handle(errors.New("Uploaded data should be same as downloaded data"))
}
- // ===== 3. list blobs =====
- // The ListBlobs and ListContainers APIs return two channels, a values channel, and an errors channel.
- // You should enumerate on a range over the values channel, and then check the errors channel, as only ONE value will ever be passed to the errors channel.
- // The AutoPagerTimeout defines how long it will wait to place into the items channel before it exits & cleans itself up. A zero time will result in no timeout.
- pager := container.ListBlobsFlat(nil)
- for pager.NextPage(ctx) {
- resp := pager.PageResponse()
+ if err = reader.Close(); err != nil {
+ handle(err)
+ return
+ }
- for _, v := range resp.ContainerListBlobFlatSegmentResult.Segment.BlobItems {
+ // ===== 3. List blobs =====
+ // List methods returns a pager object which can be used to iterate over the results of a paging operation.
+ // To iterate over a page use the NextPage(context.Context) to fetch the next page of results.
+ // PageResponse() can be used to iterate over the results of the specific page.
+ // Always check the Err() method after paging to see if an error was returned by the pager. A pager will return either an error or the page of results.
+ pager := containerClient.ListBlobsFlat(nil)
+ for pager.NextPage(context.TODO()) {
+ resp := pager.PageResponse()
+ for _, v := range resp.Segment.BlobItems {
fmt.Println(*v.Name)
}
}
@@ -196,12 +202,12 @@ Examples
handle(err)
}
- // Delete the blob we created earlier.
- _, err = blockBlob.Delete(ctx, nil)
+ // Delete the blob.
+ _, err = blockBlobClient.Delete(context.TODO(), nil)
handle(err)
- // Delete the container we created earlier.
- _, err = container.Delete(ctx, nil)
+ // Delete the container.
+ _, err = containerClient.Delete(context.TODO(), nil)
handle(err)
*/
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/highlevel.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/highlevel.go
index d602beda444..28725003981 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/highlevel.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/highlevel.go
@@ -1,3 +1,6 @@
+//go:build go1.18
+// +build go1.18
+
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
@@ -6,7 +9,6 @@ package azblob
import (
"context"
"encoding/base64"
- "fmt"
"io"
"net/http"
"sync"
@@ -20,76 +22,8 @@ import (
"os"
)
-// HighLevelUploadToBlockBlobOption identifies options used by the UploadBufferToBlockBlob and UploadFileToBlockBlob functions.
-type HighLevelUploadToBlockBlobOption struct {
- // BlockSize specifies the block size to use; the default (and maximum size) is BlockBlobMaxStageBlockBytes.
- BlockSize int64
-
- // Progress is a function that is invoked periodically as bytes are sent to the BlockBlobClient.
- // Note that the progress reporting is not always increasing; it can go down when retrying a request.
- Progress func(bytesTransferred int64)
-
- // HTTPHeaders indicates the HTTP headers to be associated with the blob.
- HTTPHeaders *BlobHTTPHeaders
-
- // Metadata indicates the metadata to be associated with the blob when PutBlockList is called.
- Metadata map[string]string
-
- // BlobAccessConditions indicates the access conditions for the block blob.
- BlobAccessConditions *BlobAccessConditions
-
- // AccessTier indicates the tier of blob
- AccessTier *AccessTier
-
- // TagsMap
- TagsMap map[string]string
-
- // ClientProvidedKeyOptions indicates the client provided key by name and/or by value to encrypt/decrypt data.
- CpkInfo *CpkInfo
- CpkScopeInfo *CpkScopeInfo
-
- // Parallelism indicates the maximum number of blocks to upload in parallel (0=default)
- Parallelism uint16
- // Optional header, Specifies the transactional crc64 for the body, to be validated by the service.
- TransactionalContentCRC64 *[]byte
- // Specify the transactional md5 for the body, to be validated by the service.
- TransactionalContentMD5 *[]byte
-}
-
-func (o HighLevelUploadToBlockBlobOption) getStageBlockOptions() *StageBlockOptions {
- leaseAccessConditions, _ := o.BlobAccessConditions.pointers()
- return &StageBlockOptions{
- CpkInfo: o.CpkInfo,
- CpkScopeInfo: o.CpkScopeInfo,
- LeaseAccessConditions: leaseAccessConditions,
- }
-}
-
-func (o HighLevelUploadToBlockBlobOption) getUploadBlockBlobOptions() *UploadBlockBlobOptions {
- return &UploadBlockBlobOptions{
- TagsMap: o.TagsMap,
- Metadata: o.Metadata,
- Tier: o.AccessTier,
- HTTPHeaders: o.HTTPHeaders,
- BlobAccessConditions: o.BlobAccessConditions,
- CpkInfo: o.CpkInfo,
- CpkScopeInfo: o.CpkScopeInfo,
- }
-}
-
-func (o *HighLevelUploadToBlockBlobOption) getCommitBlockListOptions() *CommitBlockListOptions {
- return &CommitBlockListOptions{
- BlobTagsMap: o.TagsMap,
- Metadata: o.Metadata,
- Tier: o.AccessTier,
- BlobHTTPHeaders: o.HTTPHeaders,
- CpkInfo: o.CpkInfo,
- CpkScopeInfo: o.CpkScopeInfo,
- }
-}
-
// uploadReaderAtToBlockBlob uploads a buffer in blocks to a block blob.
-func (bb BlockBlobClient) uploadReaderAtToBlockBlob(ctx context.Context, reader io.ReaderAt, readerSize int64, o HighLevelUploadToBlockBlobOption) (*http.Response, error) {
+func (bb *BlockBlobClient) uploadReaderAtToBlockBlob(ctx context.Context, reader io.ReaderAt, readerSize int64, o UploadOption) (*http.Response, error) {
if o.BlockSize == 0 {
// If bufferSize > (BlockBlobMaxStageBlockBytes * BlockBlobMaxBlocks), then error
if readerSize > BlockBlobMaxStageBlockBytes*BlockBlobMaxBlocks {
@@ -172,13 +106,13 @@ func (bb BlockBlobClient) uploadReaderAtToBlockBlob(ctx context.Context, reader
return resp.RawResponse, err
}
-// UploadBufferToBlockBlob uploads a buffer in blocks to a block blob.
-func (bb BlockBlobClient) UploadBufferToBlockBlob(ctx context.Context, b []byte, o HighLevelUploadToBlockBlobOption) (*http.Response, error) {
+// UploadBuffer uploads a buffer in blocks to a block blob.
+func (bb *BlockBlobClient) UploadBuffer(ctx context.Context, b []byte, o UploadOption) (*http.Response, error) {
return bb.uploadReaderAtToBlockBlob(ctx, bytes.NewReader(b), int64(len(b)), o)
}
-// UploadFileToBlockBlob uploads a file in blocks to a block blob.
-func (bb BlockBlobClient) UploadFileToBlockBlob(ctx context.Context, file *os.File, o HighLevelUploadToBlockBlobOption) (*http.Response, error) {
+// UploadFile uploads a file in blocks to a block blob.
+func (bb *BlockBlobClient) UploadFile(ctx context.Context, file *os.File, o UploadOption) (*http.Response, error) {
stat, err := file.Stat()
if err != nil {
@@ -187,53 +121,33 @@ func (bb BlockBlobClient) UploadFileToBlockBlob(ctx context.Context, file *os.Fi
return bb.uploadReaderAtToBlockBlob(ctx, file, stat.Size(), o)
}
-///////////////////////////////////////////////////////////////////////////////
-
-const BlobDefaultDownloadBlockSize = int64(4 * 1024 * 1024) // 4MB
-
-// HighLevelDownloadFromBlobOptions identifies options used by the DownloadBlobToBuffer and DownloadBlobToFile functions.
-type HighLevelDownloadFromBlobOptions struct {
- // BlockSize specifies the block size to use for each parallel download; the default size is BlobDefaultDownloadBlockSize.
- BlockSize int64
-
- // Progress is a function that is invoked periodically as bytes are received.
- Progress func(bytesTransferred int64)
-
- // BlobAccessConditions indicates the access conditions used when making HTTP GET requests against the blob.
- BlobAccessConditions *BlobAccessConditions
+// ---------------------------------------------------------------------------------------------------------------------
- // ClientProvidedKeyOptions indicates the client provided key by name and/or by value to encrypt/decrypt data.
- CpkInfo *CpkInfo
- CpkScopeInfo *CpkScopeInfo
-
- // Parallelism indicates the maximum number of blocks to download in parallel (0=default)
- Parallelism uint16
-
- // RetryReaderOptionsPerBlock is used when downloading each block.
- RetryReaderOptionsPerBlock RetryReaderOptions
-}
+// UploadStream copies the file held in io.Reader to the Blob at blockBlobClient.
+// A Context deadline or cancellation will cause this to error.
+func (bb *BlockBlobClient) UploadStream(ctx context.Context, body io.Reader, o UploadStreamOptions) (BlockBlobCommitBlockListResponse, error) {
+ if err := o.defaults(); err != nil {
+ return BlockBlobCommitBlockListResponse{}, err
+ }
-func (o *HighLevelDownloadFromBlobOptions) getBlobPropertiesOptions() *GetBlobPropertiesOptions {
- return &GetBlobPropertiesOptions{
- BlobAccessConditions: o.BlobAccessConditions,
- CpkInfo: o.CpkInfo,
+ // If we used the default manager, we need to close it.
+ if o.transferMangerNotSet {
+ defer o.TransferManager.Close()
}
-}
-func (o *HighLevelDownloadFromBlobOptions) getDownloadBlobOptions(offSet, count int64, rangeGetContentMD5 *bool) *DownloadBlobOptions {
- return &DownloadBlobOptions{
- BlobAccessConditions: o.BlobAccessConditions,
- CpkInfo: o.CpkInfo,
- CpkScopeInfo: o.CpkScopeInfo,
- Offset: &offSet,
- Count: &count,
- RangeGetContentMD5: rangeGetContentMD5,
+ result, err := copyFromReader(ctx, body, bb, o)
+ if err != nil {
+ return BlockBlobCommitBlockListResponse{}, err
}
+
+ return result, nil
}
-// DownloadBlobToWriterAt downloads an Azure blob to a WriterAt with parallel.
+// ---------------------------------------------------------------------------------------------------------------------
+
+// DownloadToWriterAt downloads an Azure blob to a WriterAt with parallel.
// Offset and count are optional, pass 0 for both to download the entire blob.
-func (b BlobClient) DownloadBlobToWriterAt(ctx context.Context, offset int64, count int64, writer io.WriterAt, o HighLevelDownloadFromBlobOptions) error {
+func (b *BlobClient) DownloadToWriterAt(ctx context.Context, offset int64, count int64, writer io.WriterAt, o DownloadOptions) error {
if o.BlockSize == 0 {
o.BlockSize = BlobDefaultDownloadBlockSize
}
@@ -297,16 +211,16 @@ func (b BlobClient) DownloadBlobToWriterAt(ctx context.Context, offset int64, co
return nil
}
-// DownloadBlobToBuffer downloads an Azure blob to a buffer with parallel.
+// DownloadToBuffer downloads an Azure blob to a buffer with parallel.
// Offset and count are optional, pass 0 for both to download the entire blob.
-func (b BlobClient) DownloadBlobToBuffer(ctx context.Context, offset int64, count int64, _bytes []byte, o HighLevelDownloadFromBlobOptions) error {
- return b.DownloadBlobToWriterAt(ctx, offset, count, newBytesWriter(_bytes), o)
+func (b *BlobClient) DownloadToBuffer(ctx context.Context, offset int64, count int64, _bytes []byte, o DownloadOptions) error {
+ return b.DownloadToWriterAt(ctx, offset, count, newBytesWriter(_bytes), o)
}
-// DownloadBlobToFile downloads an Azure blob to a local file.
+// DownloadToFile downloads an Azure blob to a local file.
// The file would be truncated if the size doesn't match.
// Offset and count are optional, pass 0 for both to download the entire blob.
-func (b BlobClient) DownloadBlobToFile(ctx context.Context, offset int64, count int64, file *os.File, o HighLevelDownloadFromBlobOptions) error {
+func (b *BlobClient) DownloadToFile(ctx context.Context, offset int64, count int64, file *os.File, o DownloadOptions) error {
// 1. Calculate the size of the destination file
var size int64
@@ -334,22 +248,13 @@ func (b BlobClient) DownloadBlobToFile(ctx context.Context, offset int64, count
}
if size > 0 {
- return b.DownloadBlobToWriterAt(ctx, offset, size, file, o)
+ return b.DownloadToWriterAt(ctx, offset, size, file, o)
} else { // if the blob's size is 0, there is no need in downloading it
return nil
}
}
-///////////////////////////////////////////////////////////////////////////////
-
-// BatchTransferOptions identifies options used by DoBatchTransfer.
-type BatchTransferOptions struct {
- TransferSize int64
- ChunkSize int64
- Parallelism uint16
- Operation func(offset int64, chunkSize int64, ctx context.Context) error
- OperationName string
-}
+// ---------------------------------------------------------------------------------------------------------------------
// DoBatchTransfer helps to execute operations in a batch manner.
// Can be used by users to customize batch works (for other scenarios that the SDK does not provide)
@@ -408,208 +313,4 @@ func DoBatchTransfer(ctx context.Context, o BatchTransferOptions) error {
return firstErr
}
-////////////////////////////////////////////////////////////////////////////////////////////////
-
-// TransferManager provides a buffer and thread pool manager for certain transfer options.
-// It is undefined behavior if code outside of this package call any of these methods.
-type TransferManager interface {
- // Get provides a buffer that will be used to read data into and write out to the stream.
- // It is guaranteed by this package to not read or write beyond the size of the slice.
- Get() []byte
- // Put may or may not put the buffer into underlying storage, depending on settings.
- // The buffer must not be touched after this has been called.
- Put(b []byte) // nolint
- // Run will use a goroutine pool entry to run a function. This blocks until a pool
- // goroutine becomes available.
- Run(func())
- // Close shuts down all internal goroutines. This must be called when the TransferManager
- // will no longer be used. Not closing it will cause a goroutine leak.
- Close()
-}
-
-type staticBuffer struct {
- buffers chan []byte
- size int
- threadpool chan func()
-}
-
-// NewStaticBuffer creates a TransferManager that will use a channel as a circular buffer
-// that can hold "max" buffers of "size". The goroutine pool is also sized at max. This
-// can be shared between calls if you wish to control maximum memory and concurrency with
-// multiple concurrent calls.
-func NewStaticBuffer(size, max int) (TransferManager, error) {
- if size < 1 || max < 1 {
- return nil, fmt.Errorf("cannot be called with size or max set to < 1")
- }
-
- if size < _1MiB {
- return nil, fmt.Errorf("cannot have size < 1MiB")
- }
-
- threadpool := make(chan func(), max)
- buffers := make(chan []byte, max)
- for i := 0; i < max; i++ {
- go func() {
- for f := range threadpool {
- f()
- }
- }()
-
- buffers <- make([]byte, size)
- }
- return staticBuffer{
- buffers: buffers,
- size: size,
- threadpool: threadpool,
- }, nil
-}
-
-// Get implements TransferManager.Get().
-func (s staticBuffer) Get() []byte {
- return <-s.buffers
-}
-
-// Put implements TransferManager.Put().
-func (s staticBuffer) Put(b []byte) { // nolint
- select {
- case s.buffers <- b:
- default: // This shouldn't happen, but just in case they call Put() with there own buffer.
- }
-}
-
-// Run implements TransferManager.Run().
-func (s staticBuffer) Run(f func()) {
- s.threadpool <- f
-}
-
-// Close implements TransferManager.Close().
-func (s staticBuffer) Close() {
- close(s.threadpool)
- close(s.buffers)
-}
-
-type syncPool struct {
- threadpool chan func()
- pool sync.Pool
-}
-
-// NewSyncPool creates a TransferManager that will use a sync.Pool
-// that can hold a non-capped number of buffers constrained by concurrency. This
-// can be shared between calls if you wish to share memory and concurrency.
-func NewSyncPool(size, concurrency int) (TransferManager, error) {
- if size < 1 || concurrency < 1 {
- return nil, fmt.Errorf("cannot be called with size or max set to < 1")
- }
-
- if size < _1MiB {
- return nil, fmt.Errorf("cannot have size < 1MiB")
- }
-
- threadpool := make(chan func(), concurrency)
- for i := 0; i < concurrency; i++ {
- go func() {
- for f := range threadpool {
- f()
- }
- }()
- }
-
- return &syncPool{
- threadpool: threadpool,
- pool: sync.Pool{
- New: func() interface{} {
- return make([]byte, size)
- },
- },
- }, nil
-}
-
-// Get implements TransferManager.Get().
-func (s *syncPool) Get() []byte {
- return s.pool.Get().([]byte)
-}
-
-// Put implements TransferManager.Put().
-// nolint
-func (s *syncPool) Put(b []byte) {
- s.pool.Put(b)
-}
-
-// Run implements TransferManager.Run().
-func (s *syncPool) Run(f func()) {
- s.threadpool <- f
-}
-
-// Close implements TransferManager.Close().
-func (s *syncPool) Close() {
- close(s.threadpool)
-}
-
-const _1MiB = 1024 * 1024
-
-type UploadStreamToBlockBlobOptions struct {
- // TransferManager provides a TransferManager that controls buffer allocation/reuse and
- // concurrency. This overrides BufferSize and MaxBuffers if set.
- TransferManager TransferManager
- transferMangerNotSet bool
- // BufferSize sizes the buffer used to read data from source. If < 1 MiB, defaults to 1 MiB.
- BufferSize int
- // MaxBuffers defines the number of simultaneous uploads will be performed to upload the file.
- MaxBuffers int
- HTTPHeaders *BlobHTTPHeaders
- Metadata map[string]string
- BlobAccessConditions *BlobAccessConditions
- AccessTier *AccessTier
- BlobTagsMap map[string]string
- CpkInfo *CpkInfo
- CpkScopeInfo *CpkScopeInfo
-}
-
-func (u *UploadStreamToBlockBlobOptions) defaults() error {
- if u.TransferManager != nil {
- return nil
- }
-
- if u.MaxBuffers == 0 {
- u.MaxBuffers = 1
- }
-
- if u.BufferSize < _1MiB {
- u.BufferSize = _1MiB
- }
-
- var err error
- u.TransferManager, err = NewStaticBuffer(u.BufferSize, u.MaxBuffers)
- if err != nil {
- return fmt.Errorf("bug: default transfer manager could not be created: %s", err)
- }
- u.transferMangerNotSet = true
- return nil
-}
-func (u *UploadStreamToBlockBlobOptions) getStageBlockOptions() *StageBlockOptions {
- return &StageBlockOptions{}
-}
-
-func (u *UploadStreamToBlockBlobOptions) getCommitBlockListOptions() *CommitBlockListOptions {
- return &CommitBlockListOptions{}
-}
-
-// UploadStreamToBlockBlob copies the file held in io.Reader to the Blob at blockBlobClient.
-// A Context deadline or cancellation will cause this to error.
-func (bb BlockBlobClient) UploadStreamToBlockBlob(ctx context.Context, body io.Reader, o UploadStreamToBlockBlobOptions) (BlockBlobCommitBlockListResponse, error) {
- if err := o.defaults(); err != nil {
- return BlockBlobCommitBlockListResponse{}, err
- }
-
- // If we used the default manager, we need to close it.
- if o.transferMangerNotSet {
- defer o.TransferManager.Close()
- }
-
- result, err := copyFromReader(ctx, body, bb, o)
- if err != nil {
- return BlockBlobCommitBlockListResponse{}, err
- }
-
- return result, nil
-}
+// ---------------------------------------------------------------------------------------------------------------------
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/zc_shared.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/zc_shared.go
index 733e16a26ec..cd2ada9b5db 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/zc_shared.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/zc_shared.go
@@ -1,5 +1,5 @@
-//go:build go1.16
-// +build go1.16
+//go:build go1.18
+// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/section_writer.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/section_writer.go
index c2609b0e999..d2e89f5b2a6 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/section_writer.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/section_writer.go
@@ -1,3 +1,6 @@
+//go:build go1.18
+// +build go1.18
+
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/transfer_manager.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/transfer_manager.go
new file mode 100644
index 00000000000..5c40e9bc2ab
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/transfer_manager.go
@@ -0,0 +1,154 @@
+//go:build go1.18
+// +build go1.18
+
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+package azblob
+
+import (
+ "fmt"
+ "sync"
+)
+
+// TransferManager provides a buffer and thread pool manager for certain transfer options.
+// It is undefined behavior if code outside this package call any of these methods.
+type TransferManager interface {
+ // Get provides a buffer that will be used to read data into and write out to the stream.
+ // It is guaranteed by this package to not read or write beyond the size of the slice.
+ Get() []byte
+
+ // Put may or may not put the buffer into underlying storage, depending on settings.
+ // The buffer must not be touched after this has been called.
+ Put(b []byte) // nolint
+
+ // Run will use a goroutine pool entry to run a function. This blocks until a pool
+ // goroutine becomes available.
+ Run(func())
+
+ // Close shuts down all internal goroutines. This must be called when the TransferManager
+ // will no longer be used. Not closing it will cause a goroutine leak.
+ Close()
+}
+
+// ---------------------------------------------------------------------------------------------------------------------
+
+type staticBuffer struct {
+ buffers chan []byte
+ size int
+ threadpool chan func()
+}
+
+// NewStaticBuffer creates a TransferManager that will use a channel as a circular buffer
+// that can hold "max" buffers of "size". The goroutine pool is also sized at max. This
+// can be shared between calls if you wish to control maximum memory and concurrency with
+// multiple concurrent calls.
+func NewStaticBuffer(size, max int) (TransferManager, error) {
+ if size < 1 || max < 1 {
+ return nil, fmt.Errorf("cannot be called with size or max set to < 1")
+ }
+
+ if size < _1MiB {
+ return nil, fmt.Errorf("cannot have size < 1MiB")
+ }
+
+ threadpool := make(chan func(), max)
+ buffers := make(chan []byte, max)
+ for i := 0; i < max; i++ {
+ go func() {
+ for f := range threadpool {
+ f()
+ }
+ }()
+
+ buffers <- make([]byte, size)
+ }
+ return staticBuffer{
+ buffers: buffers,
+ size: size,
+ threadpool: threadpool,
+ }, nil
+}
+
+// Get implements TransferManager.Get().
+func (s staticBuffer) Get() []byte {
+ return <-s.buffers
+}
+
+// Put implements TransferManager.Put().
+func (s staticBuffer) Put(b []byte) { // nolint
+ select {
+ case s.buffers <- b:
+ default: // This shouldn't happen, but just in case they call Put() with there own buffer.
+ }
+}
+
+// Run implements TransferManager.Run().
+func (s staticBuffer) Run(f func()) {
+ s.threadpool <- f
+}
+
+// Close implements TransferManager.Close().
+func (s staticBuffer) Close() {
+ close(s.threadpool)
+ close(s.buffers)
+}
+
+// ---------------------------------------------------------------------------------------------------------------------
+
+type syncPool struct {
+ threadpool chan func()
+ pool sync.Pool
+}
+
+// NewSyncPool creates a TransferManager that will use a sync.Pool
+// that can hold a non-capped number of buffers constrained by concurrency. This
+// can be shared between calls if you wish to share memory and concurrency.
+func NewSyncPool(size, concurrency int) (TransferManager, error) {
+ if size < 1 || concurrency < 1 {
+ return nil, fmt.Errorf("cannot be called with size or max set to < 1")
+ }
+
+ if size < _1MiB {
+ return nil, fmt.Errorf("cannot have size < 1MiB")
+ }
+
+ threadpool := make(chan func(), concurrency)
+ for i := 0; i < concurrency; i++ {
+ go func() {
+ for f := range threadpool {
+ f()
+ }
+ }()
+ }
+
+ return &syncPool{
+ threadpool: threadpool,
+ pool: sync.Pool{
+ New: func() interface{} {
+ return make([]byte, size)
+ },
+ },
+ }, nil
+}
+
+// Get implements TransferManager.Get().
+func (s *syncPool) Get() []byte {
+ return s.pool.Get().([]byte)
+}
+
+// Put implements TransferManager.Put().
+// nolint
+func (s *syncPool) Put(b []byte) {
+ s.pool.Put(b)
+}
+
+// Run implements TransferManager.Run().
+func (s *syncPool) Run(f func()) {
+ s.threadpool <- f
+}
+
+// Close implements TransferManager.Close().
+func (s *syncPool) Close() {
+ close(s.threadpool)
+}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/version.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/version.go
deleted file mode 100644
index dc86674044b..00000000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/version.go
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License.
-
-package azblob
-
-const serviceLibVersion = "0.1" //nolint
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_access_policy.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_access_policy.go
index 1286568515b..612bc784c37 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_access_policy.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_access_policy.go
@@ -1,3 +1,6 @@
+//go:build go1.18
+// +build go1.18
+
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
@@ -8,7 +11,7 @@ import (
"fmt"
)
-// The AccessPolicyPermission type simplifies creating the permissions string for a container's access policy.
+// AccessPolicyPermission type simplifies creating the permissions string for a container's access policy.
// Initialize an instance of this type and then call its String method to set AccessPolicy's Permission field.
type AccessPolicyPermission struct {
Read, Add, Create, Write, Delete, List bool
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_append_blob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_append_blob_client.go
index d0896ede50c..25490ab5950 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_append_blob_client.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_append_blob_client.go
@@ -1,3 +1,6 @@
+//go:build go1.18
+// +build go1.18
+
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
@@ -11,107 +14,141 @@ import (
"github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
)
+// AppendBlobClient represents a client to an Azure Storage append blob;
type AppendBlobClient struct {
BlobClient
client *appendBlobClient
}
// NewAppendBlobClient creates an AppendBlobClient with the specified URL, Azure AD credential, and options.
-func NewAppendBlobClient(blobURL string, cred azcore.TokenCredential, options *ClientOptions) (AppendBlobClient, error) {
+func NewAppendBlobClient(blobURL string, cred azcore.TokenCredential, options *ClientOptions) (*AppendBlobClient, error) {
authPolicy := runtime.NewBearerTokenPolicy(cred, []string{tokenScope}, nil)
- con := newConnection(blobURL, authPolicy, options.getConnectionOptions())
- return AppendBlobClient{
- client: &appendBlobClient{con: con},
- BlobClient: BlobClient{client: &blobClient{con: con}},
+ conOptions := getConnectionOptions(options)
+ conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy)
+ conn := newConnection(blobURL, conOptions)
+
+ return &AppendBlobClient{
+ client: newAppendBlobClient(conn.Endpoint(), conn.Pipeline()),
+ BlobClient: BlobClient{
+ client: newBlobClient(conn.Endpoint(), conn.Pipeline()),
+ },
}, nil
}
// NewAppendBlobClientWithNoCredential creates an AppendBlobClient with the specified URL and options.
-func NewAppendBlobClientWithNoCredential(blobURL string, options *ClientOptions) (AppendBlobClient, error) {
- con := newConnection(blobURL, nil, options.getConnectionOptions())
- return AppendBlobClient{
- client: &appendBlobClient{con: con},
- BlobClient: BlobClient{client: &blobClient{con: con}},
+func NewAppendBlobClientWithNoCredential(blobURL string, options *ClientOptions) (*AppendBlobClient, error) {
+ conOptions := getConnectionOptions(options)
+ conn := newConnection(blobURL, conOptions)
+
+ return &AppendBlobClient{
+ client: newAppendBlobClient(conn.Endpoint(), conn.Pipeline()),
+ BlobClient: BlobClient{
+ client: newBlobClient(conn.Endpoint(), conn.Pipeline()),
+ },
}, nil
}
// NewAppendBlobClientWithSharedKey creates an AppendBlobClient with the specified URL, shared key, and options.
-func NewAppendBlobClientWithSharedKey(blobURL string, cred *SharedKeyCredential, options *ClientOptions) (AppendBlobClient, error) {
+func NewAppendBlobClientWithSharedKey(blobURL string, cred *SharedKeyCredential, options *ClientOptions) (*AppendBlobClient, error) {
authPolicy := newSharedKeyCredPolicy(cred)
- con := newConnection(blobURL, authPolicy, options.getConnectionOptions())
- return AppendBlobClient{
- client: &appendBlobClient{con: con},
- BlobClient: BlobClient{client: &blobClient{con: con}},
+ conOptions := getConnectionOptions(options)
+ conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy)
+ conn := newConnection(blobURL, conOptions)
+
+ return &AppendBlobClient{
+ client: newAppendBlobClient(conn.Endpoint(), conn.Pipeline()),
+ BlobClient: BlobClient{
+ client: newBlobClient(conn.Endpoint(), conn.Pipeline()),
+ sharedKey: cred,
+ },
}, nil
}
// WithSnapshot creates a new AppendBlobURL object identical to the source but with the specified snapshot timestamp.
// Pass "" to remove the snapshot returning a URL to the base blob.
-func (ab AppendBlobClient) WithSnapshot(snapshot string) AppendBlobClient {
- p := NewBlobURLParts(ab.URL())
- p.Snapshot = snapshot
- con := &connection{u: p.URL(), p: ab.client.con.p}
-
- return AppendBlobClient{
- client: &appendBlobClient{con: con},
- BlobClient: BlobClient{client: &blobClient{con: con}},
+func (ab *AppendBlobClient) WithSnapshot(snapshot string) (*AppendBlobClient, error) {
+ p, err := NewBlobURLParts(ab.URL())
+ if err != nil {
+ return nil, err
}
+
+ p.Snapshot = snapshot
+ endpoint := p.URL()
+ pipeline := ab.client.pl
+
+ return &AppendBlobClient{
+ client: newAppendBlobClient(endpoint, pipeline),
+ BlobClient: BlobClient{
+ client: newBlobClient(endpoint, pipeline),
+ sharedKey: ab.sharedKey,
+ },
+ }, nil
}
// WithVersionID creates a new AppendBlobURL object identical to the source but with the specified version id.
// Pass "" to remove the versionID returning a URL to the base blob.
-func (ab AppendBlobClient) WithVersionID(versionID string) AppendBlobClient {
- p := NewBlobURLParts(ab.URL())
- p.VersionID = versionID
- con := &connection{u: p.URL(), p: ab.client.con.p}
-
- return AppendBlobClient{
- client: &appendBlobClient{con: con},
- BlobClient: BlobClient{client: &blobClient{con: con}},
+func (ab *AppendBlobClient) WithVersionID(versionID string) (*AppendBlobClient, error) {
+ p, err := NewBlobURLParts(ab.URL())
+ if err != nil {
+ return nil, err
}
+
+ p.VersionID = versionID
+ endpoint := p.URL()
+ pipeline := ab.client.pl
+
+ return &AppendBlobClient{
+ client: newAppendBlobClient(endpoint, pipeline),
+ BlobClient: BlobClient{
+ client: newBlobClient(endpoint, pipeline),
+ sharedKey: ab.sharedKey,
+ },
+ }, nil
}
// Create creates a 0-size append blob. Call AppendBlock to append data to an append blob.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-blob.
-func (ab AppendBlobClient) Create(ctx context.Context, options *CreateAppendBlobOptions) (AppendBlobCreateResponse, error) {
- appendBlobAppendBlockOptions, blobHttpHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions := options.pointers()
- resp, err := ab.client.Create(ctx, 0, appendBlobAppendBlockOptions, blobHttpHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions)
+func (ab *AppendBlobClient) Create(ctx context.Context, options *AppendBlobCreateOptions) (AppendBlobCreateResponse, error) {
+ appendBlobAppendBlockOptions, blobHttpHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions := options.format()
+
+ resp, err := ab.client.Create(ctx, 0, appendBlobAppendBlockOptions, blobHttpHeaders,
+ leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions)
- return resp, handleError(err)
+ return toAppendBlobCreateResponse(resp), handleError(err)
}
// AppendBlock writes a stream to a new block of data to the end of the existing append blob.
// This method panics if the stream is not at position 0.
// Note that the http client closes the body stream after the request is sent to the service.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/append-block.
-func (ab AppendBlobClient) AppendBlock(ctx context.Context, body io.ReadSeekCloser, options *AppendBlockOptions) (AppendBlobAppendBlockResponse, error) {
+func (ab *AppendBlobClient) AppendBlock(ctx context.Context, body io.ReadSeekCloser, options *AppendBlobAppendBlockOptions) (AppendBlobAppendBlockResponse, error) {
count, err := validateSeekableStreamAt0AndGetCount(body)
if err != nil {
return AppendBlobAppendBlockResponse{}, nil
}
- appendOptions, aac, cpkinfo, cpkscope, mac, lac := options.pointers()
+ appendOptions, appendPositionAccessConditions, cpkInfo, cpkScope, modifiedAccessConditions, leaseAccessConditions := options.format()
- resp, err := ab.client.AppendBlock(ctx, count, body, appendOptions, lac, aac, cpkinfo, cpkscope, mac)
+ resp, err := ab.client.AppendBlock(ctx, count, body, appendOptions, leaseAccessConditions, appendPositionAccessConditions, cpkInfo, cpkScope, modifiedAccessConditions)
- return resp, handleError(err)
+ return toAppendBlobAppendBlockResponse(resp), handleError(err)
}
// AppendBlockFromURL copies a new block of data from source URL to the end of the existing append blob.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/append-block-from-url.
-func (ab AppendBlobClient) AppendBlockFromURL(ctx context.Context, source string, options *AppendBlockURLOptions) (AppendBlobAppendBlockFromURLResponse, error) {
- appendOptions, aac, cpkinfo, cpkscope, mac, lac, smac := options.pointers()
+func (ab *AppendBlobClient) AppendBlockFromURL(ctx context.Context, source string, o *AppendBlobAppendBlockFromURLOptions) (AppendBlobAppendBlockFromURLResponse, error) {
+ appendBlockFromURLOptions, cpkInfo, cpkScopeInfo, leaseAccessConditions, appendPositionAccessConditions, modifiedAccessConditions, sourceModifiedAccessConditions := o.format()
// content length should be 0 on * from URL. always. It's a 400 if it isn't.
- resp, err := ab.client.AppendBlockFromURL(ctx, source, 0, appendOptions, cpkinfo, cpkscope, lac, aac, mac, smac)
-
- return resp, handleError(err)
+ resp, err := ab.client.AppendBlockFromURL(ctx, source, 0, appendBlockFromURLOptions, cpkInfo, cpkScopeInfo,
+ leaseAccessConditions, appendPositionAccessConditions, modifiedAccessConditions, sourceModifiedAccessConditions)
+ return toAppendBlobAppendBlockFromURLResponse(resp), handleError(err)
}
// SealAppendBlob - The purpose of Append Blob Seal is to allow users and applications to seal append blobs, marking them as read only.
// https://docs.microsoft.com/en-us/rest/api/storageservices/append-blob-seal
-func (ab AppendBlobClient) SealAppendBlob(ctx context.Context, options *SealAppendBlobOptions) (AppendBlobSealResponse, error) {
- leaseAccessConditions, modifiedAccessConditions, positionAccessConditions := options.pointers()
+func (ab *AppendBlobClient) SealAppendBlob(ctx context.Context, options *AppendBlobSealOptions) (AppendBlobSealResponse, error) {
+ leaseAccessConditions, modifiedAccessConditions, positionAccessConditions := options.format()
resp, err := ab.client.Seal(ctx, nil, leaseAccessConditions, modifiedAccessConditions, positionAccessConditions)
- return resp, handleError(err)
+ return toAppendBlobSealResponse(resp), handleError(err)
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_blob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_blob_client.go
index 8a2fd75e8e2..9543d14f877 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_blob_client.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_blob_client.go
@@ -1,3 +1,6 @@
+//go:build go1.18
+// +build go1.18
+
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
@@ -12,82 +15,101 @@ import (
"github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
)
-// A BlobClient represents a URL to an Azure Storage blob; the blob may be a block blob, append blob, or page blob.
+// BlobClient represents a URL to an Azure Storage blob; the blob may be a block blob, append blob, or page blob.
type BlobClient struct {
client *blobClient
sharedKey *SharedKeyCredential
}
// NewBlobClient creates a BlobClient object using the specified URL, Azure AD credential, and options.
-func NewBlobClient(blobURL string, cred azcore.TokenCredential, options *ClientOptions) (BlobClient, error) {
+func NewBlobClient(blobURL string, cred azcore.TokenCredential, options *ClientOptions) (*BlobClient, error) {
authPolicy := runtime.NewBearerTokenPolicy(cred, []string{tokenScope}, nil)
- con := newConnection(blobURL, authPolicy, options.getConnectionOptions())
+ conOptions := getConnectionOptions(options)
+ conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy)
+ conn := newConnection(blobURL, conOptions)
- return BlobClient{client: &blobClient{con, nil}}, nil
+ return &BlobClient{
+ client: newBlobClient(conn.Endpoint(), conn.Pipeline()),
+ }, nil
}
// NewBlobClientWithNoCredential creates a BlobClient object using the specified URL and options.
-func NewBlobClientWithNoCredential(blobURL string, options *ClientOptions) (BlobClient, error) {
- con := newConnection(blobURL, nil, options.getConnectionOptions())
+func NewBlobClientWithNoCredential(blobURL string, options *ClientOptions) (*BlobClient, error) {
+ conOptions := getConnectionOptions(options)
+ conn := newConnection(blobURL, conOptions)
- return BlobClient{client: &blobClient{con, nil}}, nil
+ return &BlobClient{
+ client: newBlobClient(conn.Endpoint(), conn.Pipeline()),
+ }, nil
}
// NewBlobClientWithSharedKey creates a BlobClient object using the specified URL, shared key, and options.
-func NewBlobClientWithSharedKey(blobURL string, cred *SharedKeyCredential, options *ClientOptions) (BlobClient, error) {
+func NewBlobClientWithSharedKey(blobURL string, cred *SharedKeyCredential, options *ClientOptions) (*BlobClient, error) {
authPolicy := newSharedKeyCredPolicy(cred)
- con := newConnection(blobURL, authPolicy, options.getConnectionOptions())
-
- return BlobClient{client: &blobClient{con, nil}, sharedKey: cred}, nil
+ conOptions := getConnectionOptions(options)
+ conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy)
+ conn := newConnection(blobURL, conOptions)
+
+ return &BlobClient{
+ client: newBlobClient(blobURL, conn.Pipeline()),
+ sharedKey: cred,
+ }, nil
}
-// NewBlobClientFromConnectionString creates BlobClient from a Connection String
+// NewBlobClientFromConnectionString creates BlobClient from a connection String
//nolint
-func NewBlobClientFromConnectionString(connectionString, containerName, blobName string, options *ClientOptions) (BlobClient, error) {
+func NewBlobClientFromConnectionString(connectionString, containerName, blobName string, options *ClientOptions) (*BlobClient, error) {
containerClient, err := NewContainerClientFromConnectionString(connectionString, containerName, options)
if err != nil {
- return BlobClient{}, err
+ return nil, err
}
- return containerClient.NewBlobClient(blobName), nil
+ return containerClient.NewBlobClient(blobName)
}
// URL returns the URL endpoint used by the BlobClient object.
-func (b BlobClient) URL() string {
- return b.client.con.u
+func (b *BlobClient) URL() string {
+ return b.client.endpoint
}
// WithSnapshot creates a new BlobClient object identical to the source but with the specified snapshot timestamp.
// Pass "" to remove the snapshot returning a URL to the base blob.
-func (b BlobClient) WithSnapshot(snapshot string) BlobClient {
- p := NewBlobURLParts(b.URL())
- p.Snapshot = snapshot
- return BlobClient{
- client: &blobClient{
- &connection{u: p.URL(), p: b.client.con.p},
- b.client.pathRenameMode,
- },
+func (b *BlobClient) WithSnapshot(snapshot string) (*BlobClient, error) {
+ p, err := NewBlobURLParts(b.URL())
+ if err != nil {
+ return nil, err
}
+ p.Snapshot = snapshot
+
+ pipeline := b.client.pl
+ return &BlobClient{
+ client: newBlobClient(p.URL(), pipeline),
+ sharedKey: b.sharedKey,
+ }, nil
}
// WithVersionID creates a new AppendBlobURL object identical to the source but with the specified version id.
// Pass "" to remove the versionID returning a URL to the base blob.
-func (b BlobClient) WithVersionID(versionID string) BlockBlobClient {
- p := NewBlobURLParts(b.URL())
- p.VersionID = versionID
- con := &connection{u: p.URL(), p: b.client.con.p}
- return BlockBlobClient{
- client: &blockBlobClient{con: con},
- BlobClient: BlobClient{client: &blobClient{con: con}},
+func (b *BlobClient) WithVersionID(versionID string) (*BlobClient, error) {
+ p, err := NewBlobURLParts(b.URL())
+ if err != nil {
+ return nil, err
}
+ p.VersionID = versionID
+
+ pipeline := b.client.pl
+ return &BlobClient{
+ client: newBlobClient(p.URL(), pipeline),
+ sharedKey: b.sharedKey,
+ }, nil
}
// Download reads a range of bytes from a blob. The response also includes the blob's properties and metadata.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob.
-func (b BlobClient) Download(ctx context.Context, options *DownloadBlobOptions) (*DownloadResponse, error) {
- o, lease, cpk, accessConditions := options.pointers()
+func (b *BlobClient) Download(ctx context.Context, options *BlobDownloadOptions) (BlobDownloadResponse, error) {
+ o, lease, cpk, accessConditions := options.format()
dr, err := b.client.Download(ctx, o, lease, cpk, accessConditions)
if err != nil {
- return nil, handleError(err)
+ return BlobDownloadResponse{}, handleError(err)
}
offset := int64(0)
@@ -100,31 +122,37 @@ func (b BlobClient) Download(ctx context.Context, options *DownloadBlobOptions)
if options != nil && options.Count != nil {
count = *options.Count
}
- return &DownloadResponse{
- b: b,
- BlobDownloadResponse: dr,
- ctx: ctx,
- getInfo: HTTPGetterInfo{Offset: offset, Count: count, ETag: *dr.ETag},
- ObjectReplicationRules: deserializeORSPolicies(dr.ObjectReplicationRules),
+
+ eTag := ""
+ if dr.ETag != nil {
+ eTag = *dr.ETag
+ }
+ return BlobDownloadResponse{
+ b: b,
+ blobClientDownloadResponse: dr,
+ ctx: ctx,
+ getInfo: HTTPGetterInfo{Offset: offset, Count: count, ETag: eTag},
+ ObjectReplicationRules: deserializeORSPolicies(dr.ObjectReplicationRules),
}, err
}
// Delete marks the specified blob or snapshot for deletion. The blob is later deleted during garbage collection.
// Note that deleting a blob also deletes all its snapshots.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/delete-blob.
-func (b BlobClient) Delete(ctx context.Context, options *DeleteBlobOptions) (BlobDeleteResponse, error) {
- basics, leaseInfo, accessConditions := options.pointers()
+func (b *BlobClient) Delete(ctx context.Context, o *BlobDeleteOptions) (BlobDeleteResponse, error) {
+ basics, leaseInfo, accessConditions := o.format()
resp, err := b.client.Delete(ctx, basics, leaseInfo, accessConditions)
- return resp, handleError(err)
+ return toBlobDeleteResponse(resp), handleError(err)
}
// Undelete restores the contents and metadata of a soft-deleted blob and any associated soft-deleted snapshots.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/undelete-blob.
-func (b BlobClient) Undelete(ctx context.Context) (BlobUndeleteResponse, error) {
- resp, err := b.client.Undelete(ctx, nil)
+func (b *BlobClient) Undelete(ctx context.Context, o *BlobUndeleteOptions) (BlobUndeleteResponse, error) {
+ undeleteOptions := o.format()
+ resp, err := b.client.Undelete(ctx, undeleteOptions)
- return resp, handleError(err)
+ return toBlobUndeleteResponse(resp), handleError(err)
}
// SetTier operation sets the tier on a blob. The operation is allowed on a page
@@ -133,98 +161,98 @@ func (b BlobClient) Undelete(ctx context.Context) (BlobUndeleteResponse, error)
// bandwidth of the blob. A block blob's tier determines Hot/Cool/Archive storage type. This operation
// does not update the blob's ETag.
// For detailed information about block blob level tiering see https://docs.microsoft.com/en-us/azure/storage/blobs/storage-blob-storage-tiers.
-func (b BlobClient) SetTier(ctx context.Context, tier AccessTier, options *SetTierOptions) (BlobSetTierResponse, error) {
- basics, lease, accessConditions := options.pointers()
+func (b *BlobClient) SetTier(ctx context.Context, tier AccessTier, options *BlobSetTierOptions) (BlobSetTierResponse, error) {
+ basics, lease, accessConditions := options.format()
resp, err := b.client.SetTier(ctx, tier, basics, lease, accessConditions)
- return resp, handleError(err)
+ return toBlobSetTierResponse(resp), handleError(err)
}
// GetProperties returns the blob's properties.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob-properties.
-func (b BlobClient) GetProperties(ctx context.Context, options *GetBlobPropertiesOptions) (GetBlobPropertiesResponse, error) {
- basics, lease, cpk, access := options.pointers()
+func (b *BlobClient) GetProperties(ctx context.Context, options *BlobGetPropertiesOptions) (BlobGetPropertiesResponse, error) {
+ basics, lease, cpk, access := options.format()
resp, err := b.client.GetProperties(ctx, basics, lease, cpk, access)
- return resp.deserializeAttributes(), handleError(err)
+ return toGetBlobPropertiesResponse(resp), handleError(err)
}
// SetHTTPHeaders changes a blob's HTTP headers.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties.
-func (b BlobClient) SetHTTPHeaders(ctx context.Context, blobHttpHeaders BlobHTTPHeaders, options *SetBlobHTTPHeadersOptions) (BlobSetHTTPHeadersResponse, error) {
- basics, lease, access := options.pointers()
+func (b *BlobClient) SetHTTPHeaders(ctx context.Context, blobHttpHeaders BlobHTTPHeaders, options *BlobSetHTTPHeadersOptions) (BlobSetHTTPHeadersResponse, error) {
+ basics, lease, access := options.format()
resp, err := b.client.SetHTTPHeaders(ctx, basics, &blobHttpHeaders, lease, access)
- return resp, handleError(err)
+ return toBlobSetHTTPHeadersResponse(resp), handleError(err)
}
// SetMetadata changes a blob's metadata.
// https://docs.microsoft.com/rest/api/storageservices/set-blob-metadata.
-func (b BlobClient) SetMetadata(ctx context.Context, metadata map[string]string, options *SetBlobMetadataOptions) (BlobSetMetadataResponse, error) {
- lease, cpk, cpkScope, access := options.pointers()
- basics := BlobSetMetadataOptions{
+func (b *BlobClient) SetMetadata(ctx context.Context, metadata map[string]string, options *BlobSetMetadataOptions) (BlobSetMetadataResponse, error) {
+ basics := blobClientSetMetadataOptions{
Metadata: metadata,
}
+ lease, cpk, cpkScope, access := options.format()
resp, err := b.client.SetMetadata(ctx, &basics, lease, cpk, cpkScope, access)
- return resp, handleError(err)
+ return toBlobSetMetadataResponse(resp), handleError(err)
}
// CreateSnapshot creates a read-only snapshot of a blob.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/snapshot-blob.
-func (b BlobClient) CreateSnapshot(ctx context.Context, options *CreateBlobSnapshotOptions) (BlobCreateSnapshotResponse, error) {
+func (b *BlobClient) CreateSnapshot(ctx context.Context, options *BlobCreateSnapshotOptions) (BlobCreateSnapshotResponse, error) {
// CreateSnapshot does NOT panic if the user tries to create a snapshot using a URL that already has a snapshot query parameter
- // because checking this would be a performance hit for a VERY unusual path and we don't think the common case should suffer this
+ // because checking this would be a performance hit for a VERY unusual path, and we don't think the common case should suffer this
// performance hit.
- basics, cpk, cpkScope, access, lease := options.pointers()
+ basics, cpk, cpkScope, access, lease := options.format()
resp, err := b.client.CreateSnapshot(ctx, basics, cpk, cpkScope, access, lease)
- return resp, handleError(err)
+ return toBlobCreateSnapshotResponse(resp), handleError(err)
}
// StartCopyFromURL copies the data at the source URL to a blob.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/copy-blob.
-func (b BlobClient) StartCopyFromURL(ctx context.Context, copySource string, options *StartCopyBlobOptions) (BlobStartCopyFromURLResponse, error) {
- basics, srcAccess, destAccess, lease := options.pointers()
+func (b *BlobClient) StartCopyFromURL(ctx context.Context, copySource string, options *BlobStartCopyOptions) (BlobStartCopyFromURLResponse, error) {
+ basics, srcAccess, destAccess, lease := options.format()
resp, err := b.client.StartCopyFromURL(ctx, copySource, basics, srcAccess, destAccess, lease)
- return resp, handleError(err)
+ return toBlobStartCopyFromURLResponse(resp), handleError(err)
}
// AbortCopyFromURL stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/abort-copy-blob.
-func (b BlobClient) AbortCopyFromURL(ctx context.Context, copyID string, options *AbortCopyBlobOptions) (BlobAbortCopyFromURLResponse, error) {
- basics, lease := options.pointers()
+func (b *BlobClient) AbortCopyFromURL(ctx context.Context, copyID string, options *BlobAbortCopyOptions) (BlobAbortCopyFromURLResponse, error) {
+ basics, lease := options.format()
resp, err := b.client.AbortCopyFromURL(ctx, copyID, basics, lease)
- return resp, handleError(err)
+ return toBlobAbortCopyFromURLResponse(resp), handleError(err)
}
// SetTags operation enables users to set tags on a blob or specific blob version, but not snapshot.
// Each call to this operation replaces all existing tags attached to the blob.
// To remove all tags from the blob, call this operation with no tags set.
// https://docs.microsoft.com/en-us/rest/api/storageservices/set-blob-tags
-func (b BlobClient) SetTags(ctx context.Context, options *SetTagsBlobOptions) (BlobSetTagsResponse, error) {
- blobSetTagsOptions, modifiedAccessConditions := options.pointers()
- resp, err := b.client.SetTags(ctx, blobSetTagsOptions, modifiedAccessConditions)
+func (b *BlobClient) SetTags(ctx context.Context, options *BlobSetTagsOptions) (BlobSetTagsResponse, error) {
+ blobSetTagsOptions, modifiedAccessConditions, leaseAccessConditions := options.format()
+ resp, err := b.client.SetTags(ctx, blobSetTagsOptions, modifiedAccessConditions, leaseAccessConditions)
- return resp, handleError(err)
+ return toBlobSetTagsResponse(resp), handleError(err)
}
// GetTags operation enables users to get tags on a blob or specific blob version, or snapshot.
// https://docs.microsoft.com/en-us/rest/api/storageservices/get-blob-tags
-func (b BlobClient) GetTags(ctx context.Context, options *GetTagsBlobOptions) (BlobGetTagsResponse, error) {
- blobGetTagsOptions, modifiedAccessConditions := options.pointers()
- resp, err := b.client.GetTags(ctx, blobGetTagsOptions, modifiedAccessConditions)
+func (b *BlobClient) GetTags(ctx context.Context, options *BlobGetTagsOptions) (BlobGetTagsResponse, error) {
+ blobGetTagsOptions, modifiedAccessConditions, leaseAccessConditions := options.format()
+ resp, err := b.client.GetTags(ctx, blobGetTagsOptions, modifiedAccessConditions, leaseAccessConditions)
- return resp, handleError(err)
+ return toBlobGetTagsResponse(resp), handleError(err)
}
// GetSASToken is a convenience method for generating a SAS token for the currently pointed at blob.
// It can only be used if the credential supplied during creation was a SharedKeyCredential.
-func (b BlobClient) GetSASToken(permissions BlobSASPermissions, start time.Time, expiry time.Time) (SASQueryParameters, error) {
- urlParts := NewBlobURLParts(b.URL())
+func (b *BlobClient) GetSASToken(permissions BlobSASPermissions, start time.Time, expiry time.Time) (SASQueryParameters, error) {
+ urlParts, _ := NewBlobURLParts(b.URL())
t, err := time.Parse(SnapshotTimeFormat, urlParts.Snapshot)
@@ -235,6 +263,7 @@ func (b BlobClient) GetSASToken(permissions BlobSASPermissions, start time.Time,
if b.sharedKey == nil {
return SASQueryParameters{}, errors.New("credential is not a SharedKeyCredential. SAS can only be signed with a SharedKeyCredential")
}
+
return BlobSASSignatureValues{
ContainerName: urlParts.ContainerName,
BlobName: urlParts.BlobName,
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_blob_lease_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_blob_lease_client.go
index c9ac3ae45ce..a9273dfb62c 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_blob_lease_client.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_blob_lease_client.go
@@ -1,3 +1,6 @@
+//go:build go1.18
+// +build go1.18
+
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
@@ -11,74 +14,76 @@ import (
"github.com/Azure/azure-sdk-for-go/sdk/internal/uuid"
)
+// BlobLeaseClient represents lease client on blob
type BlobLeaseClient struct {
BlobClient
leaseID *string
}
-func (b BlobClient) NewBlobLeaseClient(leaseID *string) (BlobLeaseClient, error) {
+// NewBlobLeaseClient is constructor for BlobLeaseClient
+func (b *BlobClient) NewBlobLeaseClient(leaseID *string) (*BlobLeaseClient, error) {
if leaseID == nil {
generatedUuid, err := uuid.New()
if err != nil {
- return BlobLeaseClient{}, err
+ return nil, err
}
- leaseID = to.StringPtr(generatedUuid.String())
+ leaseID = to.Ptr(generatedUuid.String())
}
- return BlobLeaseClient{
- BlobClient: b,
+ return &BlobLeaseClient{
+ BlobClient: *b,
leaseID: leaseID,
}, nil
}
-// AcquireLease acquires a lease on the blob for write and delete operations. The lease Duration must be between
-// 15 to 60 seconds, or infinite (-1).
+// AcquireLease acquires a lease on the blob for write and delete operations.
+//The lease Duration must be between 15 and 60 seconds, or infinite (-1).
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob.
-func (blc *BlobLeaseClient) AcquireLease(ctx context.Context, options *AcquireLeaseBlobOptions) (BlobAcquireLeaseResponse, error) {
- blobAcquireLeaseOptions, modifiedAccessConditions := options.pointers()
+func (blc *BlobLeaseClient) AcquireLease(ctx context.Context, options *BlobAcquireLeaseOptions) (BlobAcquireLeaseResponse, error) {
+ blobAcquireLeaseOptions, modifiedAccessConditions := options.format()
blobAcquireLeaseOptions.ProposedLeaseID = blc.leaseID
- resp, err := blc.client.AcquireLease(ctx, blobAcquireLeaseOptions, modifiedAccessConditions)
- return resp, handleError(err)
+ resp, err := blc.client.AcquireLease(ctx, &blobAcquireLeaseOptions, modifiedAccessConditions)
+ return toBlobAcquireLeaseResponse(resp), handleError(err)
}
// BreakLease breaks the blob's previously-acquired lease (if it exists). Pass the LeaseBreakDefault (-1)
// constant to break a fixed-Duration lease when it expires or an infinite lease immediately.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob.
-func (blc *BlobLeaseClient) BreakLease(ctx context.Context, options *BreakLeaseBlobOptions) (BlobBreakLeaseResponse, error) {
- blobBreakLeaseOptions, modifiedAccessConditions := options.pointers()
+func (blc *BlobLeaseClient) BreakLease(ctx context.Context, options *BlobBreakLeaseOptions) (BlobBreakLeaseResponse, error) {
+ blobBreakLeaseOptions, modifiedAccessConditions := options.format()
resp, err := blc.client.BreakLease(ctx, blobBreakLeaseOptions, modifiedAccessConditions)
- return resp, handleError(err)
+ return toBlobBreakLeaseResponse(resp), handleError(err)
}
// ChangeLease changes the blob's lease ID.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob.
-func (blc *BlobLeaseClient) ChangeLease(ctx context.Context, options *ChangeLeaseBlobOptions) (BlobChangeLeaseResponse, error) {
+func (blc *BlobLeaseClient) ChangeLease(ctx context.Context, options *BlobChangeLeaseOptions) (BlobChangeLeaseResponse, error) {
if blc.leaseID == nil {
return BlobChangeLeaseResponse{}, errors.New("leaseID cannot be nil")
}
- proposedLeaseID, modifiedAccessConditions, err := options.pointers()
+ proposedLeaseID, changeLeaseOptions, modifiedAccessConditions, err := options.format()
if err != nil {
return BlobChangeLeaseResponse{}, err
}
- resp, err := blc.client.ChangeLease(ctx, *blc.leaseID, *proposedLeaseID, nil, modifiedAccessConditions)
+ resp, err := blc.client.ChangeLease(ctx, *blc.leaseID, *proposedLeaseID, changeLeaseOptions, modifiedAccessConditions)
// If lease has been changed successfully, set the leaseID in client
if err == nil {
blc.leaseID = proposedLeaseID
}
- return resp, handleError(err)
+ return toBlobChangeLeaseResponse(resp), handleError(err)
}
// RenewLease renews the blob's previously-acquired lease.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob.
-func (blc *BlobLeaseClient) RenewLease(ctx context.Context, options *RenewLeaseBlobOptions) (BlobRenewLeaseResponse, error) {
+func (blc *BlobLeaseClient) RenewLease(ctx context.Context, options *BlobRenewLeaseOptions) (BlobRenewLeaseResponse, error) {
if blc.leaseID == nil {
return BlobRenewLeaseResponse{}, errors.New("leaseID cannot be nil")
}
- renewLeaseBlobOptions, modifiedAccessConditions := options.pointers()
+ renewLeaseBlobOptions, modifiedAccessConditions := options.format()
resp, err := blc.client.RenewLease(ctx, *blc.leaseID, renewLeaseBlobOptions, modifiedAccessConditions)
- return resp, handleError(err)
+ return toBlobRenewLeaseResponse(resp), handleError(err)
}
// ReleaseLease releases the blob's previously-acquired lease.
@@ -87,7 +92,7 @@ func (blc *BlobLeaseClient) ReleaseLease(ctx context.Context, options *ReleaseLe
if blc.leaseID == nil {
return BlobReleaseLeaseResponse{}, errors.New("leaseID cannot be nil")
}
- renewLeaseBlobOptions, modifiedAccessConditions := options.pointers()
+ renewLeaseBlobOptions, modifiedAccessConditions := options.format()
resp, err := blc.client.ReleaseLease(ctx, *blc.leaseID, renewLeaseBlobOptions, modifiedAccessConditions)
- return resp, handleError(err)
+ return toBlobReleaseLeaseResponse(resp), handleError(err)
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_block_blob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_block_blob_client.go
index 2160e51c665..b080128c815 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_block_blob_client.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_block_blob_client.go
@@ -1,3 +1,6 @@
+//go:build go1.18
+// +build go1.18
+
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
@@ -12,17 +15,6 @@ import (
"github.com/Azure/azure-sdk-for-go/sdk/azcore/to"
)
-const (
- // BlockBlobMaxUploadBlobBytes indicates the maximum number of bytes that can be sent in a call to Upload.
- BlockBlobMaxUploadBlobBytes = 256 * 1024 * 1024 // 256MB
-
- // BlockBlobMaxStageBlockBytes indicates the maximum number of bytes that can be sent in a call to StageBlock.
- BlockBlobMaxStageBlockBytes = 4000 * 1024 * 1024 // 4GB
-
- // BlockBlobMaxBlocks indicates the maximum number of blocks allowed in a block blob.
- BlockBlobMaxBlocks = 50000
-)
-
// BlockBlobClient defines a set of operations applicable to block blobs.
type BlockBlobClient struct {
BlobClient
@@ -30,58 +22,92 @@ type BlockBlobClient struct {
}
// NewBlockBlobClient creates a BlockBlobClient object using the specified URL, Azure AD credential, and options.
-func NewBlockBlobClient(blobURL string, cred azcore.TokenCredential, options *ClientOptions) (BlockBlobClient, error) {
+func NewBlockBlobClient(blobURL string, cred azcore.TokenCredential, options *ClientOptions) (*BlockBlobClient, error) {
authPolicy := runtime.NewBearerTokenPolicy(cred, []string{tokenScope}, nil)
- con := newConnection(blobURL, authPolicy, options.getConnectionOptions())
- return BlockBlobClient{
- client: &blockBlobClient{con: con},
- BlobClient: BlobClient{client: &blobClient{con: con}},
+ conOptions := getConnectionOptions(options)
+ conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy)
+ conn := newConnection(blobURL, conOptions)
+
+ bClient := newBlobClient(conn.Endpoint(), conn.Pipeline())
+ return &BlockBlobClient{
+ client: newBlockBlobClient(bClient.endpoint, bClient.pl),
+ BlobClient: BlobClient{
+ client: bClient,
+ },
}, nil
}
// NewBlockBlobClientWithNoCredential creates a BlockBlobClient object using the specified URL and options.
-func NewBlockBlobClientWithNoCredential(blobURL string, options *ClientOptions) (BlockBlobClient, error) {
- con := newConnection(blobURL, nil, options.getConnectionOptions())
- return BlockBlobClient{
- client: &blockBlobClient{con: con},
- BlobClient: BlobClient{client: &blobClient{con: con}},
+func NewBlockBlobClientWithNoCredential(blobURL string, options *ClientOptions) (*BlockBlobClient, error) {
+ conOptions := getConnectionOptions(options)
+ conn := newConnection(blobURL, conOptions)
+
+ bClient := newBlobClient(conn.Endpoint(), conn.Pipeline())
+ return &BlockBlobClient{
+ client: newBlockBlobClient(bClient.endpoint, bClient.pl),
+ BlobClient: BlobClient{
+ client: bClient,
+ },
}, nil
}
// NewBlockBlobClientWithSharedKey creates a BlockBlobClient object using the specified URL, shared key, and options.
-func NewBlockBlobClientWithSharedKey(blobURL string, cred *SharedKeyCredential, options *ClientOptions) (BlockBlobClient, error) {
+func NewBlockBlobClientWithSharedKey(blobURL string, cred *SharedKeyCredential, options *ClientOptions) (*BlockBlobClient, error) {
authPolicy := newSharedKeyCredPolicy(cred)
- con := newConnection(blobURL, authPolicy, options.getConnectionOptions())
- return BlockBlobClient{
- client: &blockBlobClient{con: con},
- BlobClient: BlobClient{client: &blobClient{con: con}},
+ conOptions := getConnectionOptions(options)
+ conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy)
+ conn := newConnection(blobURL, conOptions)
+
+ bClient := newBlobClient(conn.Endpoint(), conn.Pipeline())
+ return &BlockBlobClient{
+ client: newBlockBlobClient(bClient.endpoint, bClient.pl),
+ BlobClient: BlobClient{
+ client: bClient,
+ sharedKey: cred,
+ },
}, nil
}
// WithSnapshot creates a new BlockBlobClient object identical to the source but with the specified snapshot timestamp.
// Pass "" to remove the snapshot returning a URL to the base blob.
-func (bb BlockBlobClient) WithSnapshot(snapshot string) BlockBlobClient {
- p := NewBlobURLParts(bb.URL())
+func (bb *BlockBlobClient) WithSnapshot(snapshot string) (*BlockBlobClient, error) {
+ p, err := NewBlobURLParts(bb.URL())
+ if err != nil {
+ return nil, err
+ }
+
p.Snapshot = snapshot
- con := &connection{u: p.URL(), p: bb.client.con.p}
- return BlockBlobClient{
- client: &blockBlobClient{
- con: con,
+ endpoint := p.URL()
+ bClient := newBlobClient(endpoint, bb.client.pl)
+
+ return &BlockBlobClient{
+ client: newBlockBlobClient(bClient.endpoint, bClient.pl),
+ BlobClient: BlobClient{
+ client: bClient,
+ sharedKey: bb.sharedKey,
},
- BlobClient: BlobClient{client: &blobClient{con: con}},
- }
+ }, nil
}
// WithVersionID creates a new AppendBlobURL object identical to the source but with the specified version id.
// Pass "" to remove the versionID returning a URL to the base blob.
-func (bb BlockBlobClient) WithVersionID(versionID string) BlockBlobClient {
- p := NewBlobURLParts(bb.URL())
- p.VersionID = versionID
- con := &connection{u: p.URL(), p: bb.client.con.p}
- return BlockBlobClient{
- client: &blockBlobClient{con: con},
- BlobClient: BlobClient{client: &blobClient{con: con}},
+func (bb *BlockBlobClient) WithVersionID(versionID string) (*BlockBlobClient, error) {
+ p, err := NewBlobURLParts(bb.URL())
+ if err != nil {
+ return nil, err
}
+
+ p.VersionID = versionID
+ endpoint := p.URL()
+ bClient := newBlobClient(endpoint, bb.client.pl)
+
+ return &BlockBlobClient{
+ client: newBlockBlobClient(bClient.endpoint, bClient.pl),
+ BlobClient: BlobClient{
+ client: bClient,
+ sharedKey: bb.sharedKey,
+ },
+ }, nil
}
// Upload creates a new block blob or overwrites an existing block blob.
@@ -91,43 +117,47 @@ func (bb BlockBlobClient) WithVersionID(versionID string) BlockBlobClient {
// This method panics if the stream is not at position 0.
// Note that the http client closes the body stream after the request is sent to the service.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-blob.
-func (bb BlockBlobClient) Upload(ctx context.Context, body io.ReadSeekCloser, options *UploadBlockBlobOptions) (BlockBlobUploadResponse, error) {
+func (bb *BlockBlobClient) Upload(ctx context.Context, body io.ReadSeekCloser, options *BlockBlobUploadOptions) (BlockBlobUploadResponse, error) {
count, err := validateSeekableStreamAt0AndGetCount(body)
if err != nil {
return BlockBlobUploadResponse{}, err
}
- basics, httpHeaders, leaseInfo, cpkV, cpkN, accessConditions := options.pointers()
+ basics, httpHeaders, leaseInfo, cpkV, cpkN, accessConditions := options.format()
resp, err := bb.client.Upload(ctx, count, body, basics, httpHeaders, leaseInfo, cpkV, cpkN, accessConditions)
- return resp, handleError(err)
+ return toBlockBlobUploadResponse(resp), handleError(err)
}
// StageBlock uploads the specified block to the block blob's "staging area" to be later committed by a call to CommitBlockList.
// Note that the http client closes the body stream after the request is sent to the service.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-block.
-func (bb BlockBlobClient) StageBlock(ctx context.Context, base64BlockID string, body io.ReadSeekCloser, options *StageBlockOptions) (BlockBlobStageBlockResponse, error) {
+func (bb *BlockBlobClient) StageBlock(ctx context.Context, base64BlockID string, body io.ReadSeekCloser,
+ options *BlockBlobStageBlockOptions) (BlockBlobStageBlockResponse, error) {
count, err := validateSeekableStreamAt0AndGetCount(body)
if err != nil {
return BlockBlobStageBlockResponse{}, err
}
- ac, stageBlockOptions, cpkInfo, cpkScopeInfo := options.pointers()
- resp, err := bb.client.StageBlock(ctx, base64BlockID, count, body, stageBlockOptions, ac, cpkInfo, cpkScopeInfo)
+ stageBlockOptions, leaseAccessConditions, cpkInfo, cpkScopeInfo := options.format()
+ resp, err := bb.client.StageBlock(ctx, base64BlockID, count, body, stageBlockOptions, leaseAccessConditions, cpkInfo, cpkScopeInfo)
- return resp, handleError(err)
+ return toBlockBlobStageBlockResponse(resp), handleError(err)
}
// StageBlockFromURL copies the specified block from a source URL to the block blob's "staging area" to be later committed by a call to CommitBlockList.
// If count is CountToEnd (0), then data is read from specified offset to the end.
// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/put-block-from-url.
-func (bb BlockBlobClient) StageBlockFromURL(ctx context.Context, base64BlockID string, sourceURL string, contentLength int64, options *StageBlockFromURLOptions) (BlockBlobStageBlockFromURLResponse, error) {
- ac, smac, stageOptions, cpkInfo, cpkScope := options.pointers()
+func (bb *BlockBlobClient) StageBlockFromURL(ctx context.Context, base64BlockID string, sourceURL string,
+ contentLength int64, options *BlockBlobStageBlockFromURLOptions) (BlockBlobStageBlockFromURLResponse, error) {
- resp, err := bb.client.StageBlockFromURL(ctx, base64BlockID, contentLength, sourceURL, stageOptions, cpkInfo, cpkScope, ac, smac)
+ stageBlockFromURLOptions, cpkInfo, cpkScopeInfo, leaseAccessConditions, sourceModifiedAccessConditions := options.format()
- return resp, handleError(err)
+ resp, err := bb.client.StageBlockFromURL(ctx, base64BlockID, contentLength, sourceURL, stageBlockFromURLOptions,
+ cpkInfo, cpkScopeInfo, leaseAccessConditions, sourceModifiedAccessConditions)
+
+ return toBlockBlobStageBlockFromURLResponse(resp), handleError(err)
}
// CommitBlockList writes a blob by specifying the list of block IDs that make up the blob.
@@ -136,42 +166,36 @@ func (bb BlockBlobClient) StageBlockFromURL(ctx context.Context, base64BlockID s
// by uploading only those blocks that have changed, then committing the new and existing
// blocks together. Any blocks not specified in the block list and permanently deleted.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-block-list.
-func (bb BlockBlobClient) CommitBlockList(ctx context.Context, base64BlockIDs []string, options *CommitBlockListOptions) (BlockBlobCommitBlockListResponse, error) {
- commitOptions, headers, cpkInfo, cpkScope, modifiedAccess, leaseAccess := options.pointers()
-
+func (bb *BlockBlobClient) CommitBlockList(ctx context.Context, base64BlockIDs []string, options *BlockBlobCommitBlockListOptions) (BlockBlobCommitBlockListResponse, error) {
// this is a code smell in the generated code
blockIds := make([]*string, len(base64BlockIDs))
for k, v := range base64BlockIDs {
- blockIds[k] = to.StringPtr(v)
+ blockIds[k] = to.Ptr(v)
}
- resp, err := bb.client.CommitBlockList(ctx, BlockLookupList{
- Latest: blockIds,
- }, commitOptions, headers, leaseAccess, cpkInfo, cpkScope, modifiedAccess)
+ blockLookupList := BlockLookupList{Latest: blockIds}
+ commitOptions, headers, leaseAccess, cpkInfo, cpkScope, modifiedAccess := options.format()
+
+ resp, err := bb.client.CommitBlockList(ctx, blockLookupList, commitOptions, headers, leaseAccess, cpkInfo, cpkScope, modifiedAccess)
- return resp, handleError(err)
+ return toBlockBlobCommitBlockListResponse(resp), handleError(err)
}
// GetBlockList returns the list of blocks that have been uploaded as part of a block blob using the specified block list filter.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-block-list.
-func (bb BlockBlobClient) GetBlockList(ctx context.Context, listType BlockListType, options *GetBlockListOptions) (BlockBlobGetBlockListResponse, error) {
- o, mac, lac := options.pointers()
+func (bb *BlockBlobClient) GetBlockList(ctx context.Context, listType BlockListType, options *BlockBlobGetBlockListOptions) (BlockBlobGetBlockListResponse, error) {
+ o, lac, mac := options.format()
resp, err := bb.client.GetBlockList(ctx, listType, o, lac, mac)
- return resp, handleError(err)
+ return toBlockBlobGetBlockListResponse(resp), handleError(err)
}
// CopyFromURL synchronously copies the data at the source URL to a block blob, with sizes up to 256 MB.
// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/copy-blob-from-url.
-func (bb BlockBlobClient) CopyFromURL(ctx context.Context, source string, options *CopyBlockBlobFromURLOptions) (BlobCopyFromURLResponse, error) {
- copyOptions, smac, mac, lac := options.pointers()
-
- bClient := blobClient{
- con: bb.client.con,
- }
-
- resp, err := bClient.CopyFromURL(ctx, source, copyOptions, smac, mac, lac)
+func (bb *BlockBlobClient) CopyFromURL(ctx context.Context, source string, options *BlockBlobCopyFromURLOptions) (BlockBlobCopyFromURLResponse, error) {
+ copyOptions, smac, mac, lac := options.format()
+ resp, err := bb.BlobClient.client.CopyFromURL(ctx, source, copyOptions, smac, mac, lac)
- return resp, handleError(err)
+ return toBlockBlobCopyFromURLResponse(resp), handleError(err)
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_connection_string.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_connection_string.go
index f088fb5d459..2c23b8f4ed8 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_connection_string.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_connection_string.go
@@ -1,3 +1,6 @@
+//go:build go1.18
+// +build go1.18
+
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_container_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_container_client.go
index 16c3e1a006d..12c4a18dfd1 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_container_client.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_container_client.go
@@ -1,3 +1,6 @@
+//go:build go1.18
+// +build go1.18
+
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
@@ -5,55 +8,67 @@ package azblob
import (
"context"
+ "errors"
"time"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
-
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
)
-// A ContainerClient represents a URL to the Azure Storage container allowing you to manipulate its blobs.
+// ContainerClient represents a URL to the Azure Storage container allowing you to manipulate its blobs.
type ContainerClient struct {
client *containerClient
sharedKey *SharedKeyCredential
}
// URL returns the URL endpoint used by the ContainerClient object.
-func (c ContainerClient) URL() string {
- return c.client.con.u
+func (c *ContainerClient) URL() string {
+ return c.client.endpoint
}
// NewContainerClient creates a ContainerClient object using the specified URL, Azure AD credential, and options.
-func NewContainerClient(containerURL string, cred azcore.TokenCredential, options *ClientOptions) (ContainerClient, error) {
+func NewContainerClient(containerURL string, cred azcore.TokenCredential, options *ClientOptions) (*ContainerClient, error) {
authPolicy := runtime.NewBearerTokenPolicy(cred, []string{tokenScope}, nil)
- return ContainerClient{client: &containerClient{
- con: newConnection(containerURL, authPolicy, options.getConnectionOptions()),
- }}, nil
+ conOptions := getConnectionOptions(options)
+ conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy)
+ conn := newConnection(containerURL, conOptions)
+
+ return &ContainerClient{
+ client: newContainerClient(conn.Endpoint(), conn.Pipeline()),
+ }, nil
}
// NewContainerClientWithNoCredential creates a ContainerClient object using the specified URL and options.
-func NewContainerClientWithNoCredential(containerURL string, options *ClientOptions) (ContainerClient, error) {
- return ContainerClient{client: &containerClient{
- con: newConnection(containerURL, nil, options.getConnectionOptions()),
- }}, nil
+func NewContainerClientWithNoCredential(containerURL string, options *ClientOptions) (*ContainerClient, error) {
+ conOptions := getConnectionOptions(options)
+ conn := newConnection(containerURL, conOptions)
+
+ return &ContainerClient{
+ client: newContainerClient(conn.Endpoint(), conn.Pipeline()),
+ }, nil
}
// NewContainerClientWithSharedKey creates a ContainerClient object using the specified URL, shared key, and options.
-func NewContainerClientWithSharedKey(containerURL string, cred *SharedKeyCredential, options *ClientOptions) (ContainerClient, error) {
+func NewContainerClientWithSharedKey(containerURL string, cred *SharedKeyCredential, options *ClientOptions) (*ContainerClient, error) {
authPolicy := newSharedKeyCredPolicy(cred)
- return ContainerClient{client: &containerClient{
- con: newConnection(containerURL, authPolicy, options.getConnectionOptions()),
- }, sharedKey: cred}, nil
+ conOptions := getConnectionOptions(options)
+ conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy)
+ conn := newConnection(containerURL, conOptions)
+
+ return &ContainerClient{
+ client: newContainerClient(conn.Endpoint(), conn.Pipeline()),
+ sharedKey: cred,
+ }, nil
}
// NewContainerClientFromConnectionString creates a ContainerClient object using connection string of an account
-func NewContainerClientFromConnectionString(connectionString string, containerName string, options *ClientOptions) (ContainerClient, error) {
+func NewContainerClientFromConnectionString(connectionString string, containerName string, options *ClientOptions) (*ContainerClient, error) {
svcClient, err := NewServiceClientFromConnectionString(connectionString, options)
if err != nil {
- return ContainerClient{}, err
+ return nil, err
}
- return svcClient.NewContainerClient(containerName), nil
+ return svcClient.NewContainerClient(containerName)
}
// NewBlobClient creates a new BlobClient object by concatenating blobName to the end of
@@ -61,13 +76,13 @@ func NewContainerClientFromConnectionString(connectionString string, containerNa
// To change the pipeline, create the BlobClient and then call its WithPipeline method passing in the
// desired pipeline object. Or, call this package's NewBlobClient instead of calling this object's
// NewBlobClient method.
-func (c ContainerClient) NewBlobClient(blobName string) BlobClient {
+func (c *ContainerClient) NewBlobClient(blobName string) (*BlobClient, error) {
blobURL := appendToURLPath(c.URL(), blobName)
- newCon := &connection{u: blobURL, p: c.client.con.p}
- return BlobClient{
- client: &blobClient{newCon, nil},
- }
+ return &BlobClient{
+ client: newBlobClient(blobURL, c.client.pl),
+ sharedKey: c.sharedKey,
+ }, nil
}
// NewAppendBlobClient creates a new AppendBlobURL object by concatenating blobName to the end of
@@ -75,14 +90,16 @@ func (c ContainerClient) NewBlobClient(blobName string) BlobClient {
// To change the pipeline, create the AppendBlobURL and then call its WithPipeline method passing in the
// desired pipeline object. Or, call this package's NewAppendBlobClient instead of calling this object's
// NewAppendBlobClient method.
-func (c ContainerClient) NewAppendBlobClient(blobName string) AppendBlobClient {
+func (c *ContainerClient) NewAppendBlobClient(blobName string) (*AppendBlobClient, error) {
blobURL := appendToURLPath(c.URL(), blobName)
- newCon := &connection{blobURL, c.client.con.p}
- return AppendBlobClient{
- client: &appendBlobClient{newCon},
- BlobClient: BlobClient{client: &blobClient{con: newCon}},
- }
+ return &AppendBlobClient{
+ BlobClient: BlobClient{
+ client: newBlobClient(blobURL, c.client.pl),
+ sharedKey: c.sharedKey,
+ },
+ client: newAppendBlobClient(blobURL, c.client.pl),
+ }, nil
}
// NewBlockBlobClient creates a new BlockBlobClient object by concatenating blobName to the end of
@@ -90,113 +107,105 @@ func (c ContainerClient) NewAppendBlobClient(blobName string) AppendBlobClient {
// To change the pipeline, create the BlockBlobClient and then call its WithPipeline method passing in the
// desired pipeline object. Or, call this package's NewBlockBlobClient instead of calling this object's
// NewBlockBlobClient method.
-func (c ContainerClient) NewBlockBlobClient(blobName string) BlockBlobClient {
+func (c *ContainerClient) NewBlockBlobClient(blobName string) (*BlockBlobClient, error) {
blobURL := appendToURLPath(c.URL(), blobName)
- newCon := &connection{blobURL, c.client.con.p}
- return BlockBlobClient{
- client: &blockBlobClient{newCon},
- BlobClient: BlobClient{client: &blobClient{con: newCon}},
- }
+ return &BlockBlobClient{
+ BlobClient: BlobClient{
+ client: newBlobClient(blobURL, c.client.pl),
+ sharedKey: c.sharedKey,
+ },
+ client: newBlockBlobClient(blobURL, c.client.pl),
+ }, nil
}
// NewPageBlobClient creates a new PageBlobURL object by concatenating blobName to the end of ContainerClient's URL. The new PageBlobURL uses the same request policy pipeline as the ContainerClient.
// To change the pipeline, create the PageBlobURL and then call its WithPipeline method passing in the
// desired pipeline object. Or, call this package's NewPageBlobClient instead of calling this object's
// NewPageBlobClient method.
-func (c ContainerClient) NewPageBlobClient(blobName string) PageBlobClient {
+func (c *ContainerClient) NewPageBlobClient(blobName string) (*PageBlobClient, error) {
blobURL := appendToURLPath(c.URL(), blobName)
- newCon := &connection{blobURL, c.client.con.p}
- return PageBlobClient{
- client: &pageBlobClient{newCon},
- BlobClient: BlobClient{client: &blobClient{con: newCon}},
- }
+ return &PageBlobClient{
+ BlobClient: BlobClient{
+ client: newBlobClient(blobURL, c.client.pl),
+ sharedKey: c.sharedKey,
+ },
+ client: newPageBlobClient(blobURL, c.client.pl),
+ }, nil
}
// Create creates a new container within a storage account. If a container with the same name already exists, the operation fails.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/create-container.
-func (c ContainerClient) Create(ctx context.Context, options *CreateContainerOptions) (ContainerCreateResponse, error) {
- basics, cpkInfo := options.pointers()
+func (c *ContainerClient) Create(ctx context.Context, options *ContainerCreateOptions) (ContainerCreateResponse, error) {
+ basics, cpkInfo := options.format()
resp, err := c.client.Create(ctx, basics, cpkInfo)
- return resp, handleError(err)
+ return toContainerCreateResponse(resp), handleError(err)
}
// Delete marks the specified container for deletion. The container and any blobs contained within it are later deleted during garbage collection.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/delete-container.
-func (c ContainerClient) Delete(ctx context.Context, options *DeleteContainerOptions) (ContainerDeleteResponse, error) {
- basics, leaseInfo, accessConditions := options.pointers()
+func (c *ContainerClient) Delete(ctx context.Context, o *ContainerDeleteOptions) (ContainerDeleteResponse, error) {
+ basics, leaseInfo, accessConditions := o.format()
resp, err := c.client.Delete(ctx, basics, leaseInfo, accessConditions)
- return resp, handleError(err)
+ return toContainerDeleteResponse(resp), handleError(err)
}
// GetProperties returns the container's properties.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-container-metadata.
-func (c ContainerClient) GetProperties(ctx context.Context, gpo *GetPropertiesOptionsContainer) (ContainerGetPropertiesResponse, error) {
+func (c *ContainerClient) GetProperties(ctx context.Context, o *ContainerGetPropertiesOptions) (ContainerGetPropertiesResponse, error) {
// NOTE: GetMetadata actually calls GetProperties internally because GetProperties returns the metadata AND the properties.
// This allows us to not expose a GetProperties method at all simplifying the API.
// The optionals are nil, like they were in track 1.5
- options, leaseAccess := gpo.pointers()
-
+ options, leaseAccess := o.format()
resp, err := c.client.GetProperties(ctx, options, leaseAccess)
- return resp, handleError(err)
+ return toContainerGetPropertiesResponse(resp), handleError(err)
}
// SetMetadata sets the container's metadata.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-container-metadata.
-func (c ContainerClient) SetMetadata(ctx context.Context, options *SetMetadataContainerOptions) (ContainerSetMetadataResponse, error) {
- metadataOptions, lac, mac := options.pointers()
-
+func (c *ContainerClient) SetMetadata(ctx context.Context, o *ContainerSetMetadataOptions) (ContainerSetMetadataResponse, error) {
+ metadataOptions, lac, mac := o.format()
resp, err := c.client.SetMetadata(ctx, metadataOptions, lac, mac)
- return resp, handleError(err)
+ return toContainerSetMetadataResponse(resp), handleError(err)
}
// GetAccessPolicy returns the container's access policy. The access policy indicates whether container's blobs may be accessed publicly.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-container-acl.
-func (c ContainerClient) GetAccessPolicy(ctx context.Context, options *GetAccessPolicyOptions) (ContainerGetAccessPolicyResponse, error) {
- o, ac := options.pointers()
-
- resp, err := c.client.GetAccessPolicy(ctx, o, ac)
+func (c *ContainerClient) GetAccessPolicy(ctx context.Context, o *ContainerGetAccessPolicyOptions) (ContainerGetAccessPolicyResponse, error) {
+ options, ac := o.format()
+ resp, err := c.client.GetAccessPolicy(ctx, options, ac)
- return resp, handleError(err)
+ return toContainerGetAccessPolicyResponse(resp), handleError(err)
}
// SetAccessPolicy sets the container's permissions. The access policy indicates whether blobs in a container may be accessed publicly.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-container-acl.
-func (c ContainerClient) SetAccessPolicy(ctx context.Context, options *SetAccessPolicyOptions) (ContainerSetAccessPolicyResponse, error) {
- accessPolicy, mac, lac := options.pointers()
+func (c *ContainerClient) SetAccessPolicy(ctx context.Context, o *ContainerSetAccessPolicyOptions) (ContainerSetAccessPolicyResponse, error) {
+ accessPolicy, mac, lac := o.format()
+ resp, err := c.client.SetAccessPolicy(ctx, accessPolicy, mac, lac)
- resp, err := c.client.SetAccessPolicy(ctx, &accessPolicy, mac, lac)
-
- return resp, handleError(err)
+ return toContainerSetAccessPolicyResponse(resp), handleError(err)
}
// ListBlobsFlat returns a pager for blobs starting from the specified Marker. Use an empty
// Marker to start enumeration from the beginning. Blob names are returned in lexicographic order.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/list-blobs.
-func (c ContainerClient) ListBlobsFlat(listOptions *ContainerListBlobFlatSegmentOptions) *ContainerListBlobFlatSegmentPager {
+func (c *ContainerClient) ListBlobsFlat(o *ContainerListBlobsFlatOptions) *ContainerListBlobFlatPager {
+ listOptions := o.format()
pager := c.client.ListBlobFlatSegment(listOptions)
- // override the generated pager to insert our handleError(error)
- if pager.Err() != nil {
- return pager
- }
// override the advancer
- pager.advancer = func(ctx context.Context, response ContainerListBlobFlatSegmentResponse) (*policy.Request, error) {
+ pager.advancer = func(ctx context.Context, response containerClientListBlobFlatSegmentResponse) (*policy.Request, error) {
listOptions.Marker = response.NextMarker
return c.client.listBlobFlatSegmentCreateRequest(ctx, listOptions)
}
- // TODO: Come Here
- //pager.err = func(response *azcore.Response) error {
- // return handleError(c.client.listBlobFlatSegmentHandleError(response))
- //}
-
- return pager
+ return toContainerListBlobFlatSegmentPager(pager)
}
// ListBlobsHierarchy returns a channel of blobs starting from the specified Marker. Use an empty
@@ -207,40 +216,38 @@ func (c ContainerClient) ListBlobsFlat(listOptions *ContainerListBlobFlatSegment
// AutoPagerTimeout specifies the amount of time with no read operations before the channel times out and closes. Specify no time and it will be ignored.
// AutoPagerBufferSize specifies the channel's buffer size.
// Both the blob item channel and error channel should be watched. Only one error will be released via this channel (or a nil error, to register a clean exit.)
-func (c ContainerClient) ListBlobsHierarchy(delimiter string, listOptions *ContainerListBlobHierarchySegmentOptions) *ContainerListBlobHierarchySegmentPager {
+func (c *ContainerClient) ListBlobsHierarchy(delimiter string, o *ContainerListBlobsHierarchyOptions) *ContainerListBlobHierarchyPager {
+ listOptions := o.format()
pager := c.client.ListBlobHierarchySegment(delimiter, listOptions)
- // override the generated pager to insert our handleError(error)
- if pager.Err() != nil {
- return pager
- }
// override the advancer
- pager.advancer = func(ctx context.Context, response ContainerListBlobHierarchySegmentResponse) (*policy.Request, error) {
+ pager.advancer = func(ctx context.Context, response containerClientListBlobHierarchySegmentResponse) (*policy.Request, error) {
listOptions.Marker = response.NextMarker
return c.client.listBlobHierarchySegmentCreateRequest(ctx, delimiter, listOptions)
}
- // todo: come here
- //p.errorer = func(response *azcore.Response) error {
- // return handleError(c.client.listBlobHierarchySegmentHandleError(response))
- //}
-
- return pager
+ return toContainerListBlobHierarchySegmentPager(pager)
}
-// GetSASToken is a convenience method for generating a SAS token for the currently pointed at container.
+// GetSASURL is a convenience method for generating a SAS token for the currently pointed at container.
// It can only be used if the credential supplied during creation was a SharedKeyCredential.
-func (c ContainerClient) GetSASToken(permissions ContainerSASPermissions, start time.Time, expiry time.Time) (SASQueryParameters, error) {
- urlParts := NewBlobURLParts(c.URL())
+func (c *ContainerClient) GetSASURL(permissions ContainerSASPermissions, start time.Time, expiry time.Time) (string, error) {
+ if c.sharedKey == nil {
+ return "", errors.New("SAS can only be signed with a SharedKeyCredential")
+ }
- // Containers do not have snapshots, nor versions.
+ urlParts, err := NewBlobURLParts(c.URL())
+ if err != nil {
+ return "", err
+ }
- return BlobSASSignatureValues{
+ // Containers do not have snapshots, nor versions.
+ urlParts.SAS, err = BlobSASSignatureValues{
ContainerName: urlParts.ContainerName,
-
- Permissions: permissions.String(),
-
- StartTime: start.UTC(),
- ExpiryTime: expiry.UTC(),
+ Permissions: permissions.String(),
+ StartTime: start.UTC(),
+ ExpiryTime: expiry.UTC(),
}.NewSASQueryParameters(c.sharedKey)
+
+ return urlParts.URL(), err
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_container_lease_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_container_lease_client.go
index 716d5e84c97..395a72a89aa 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_container_lease_client.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_container_lease_client.go
@@ -1,3 +1,6 @@
+//go:build go1.18
+// +build go1.18
+
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
@@ -11,82 +14,89 @@ import (
"github.com/Azure/azure-sdk-for-go/sdk/internal/uuid"
)
+//ContainerLeaseClient represents lease client of container
type ContainerLeaseClient struct {
ContainerClient
leaseID *string
}
-func (c ContainerClient) NewContainerLeaseClient(leaseID *string) (ContainerLeaseClient, error) {
+// NewContainerLeaseClient is constructor of ContainerLeaseClient
+func (c *ContainerClient) NewContainerLeaseClient(leaseID *string) (*ContainerLeaseClient, error) {
if leaseID == nil {
generatedUuid, err := uuid.New()
if err != nil {
- return ContainerLeaseClient{}, err
+ return nil, err
}
- leaseID = to.StringPtr(generatedUuid.String())
+ leaseID = to.Ptr(generatedUuid.String())
}
- return ContainerLeaseClient{
- ContainerClient: c,
+ return &ContainerLeaseClient{
+ ContainerClient: *c,
leaseID: leaseID,
}, nil
}
// AcquireLease acquires a lease on the container for delete operations. The lease Duration must be between 15 to 60 seconds, or infinite (-1).
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container.
-func (clc *ContainerLeaseClient) AcquireLease(ctx context.Context, options *AcquireLeaseContainerOptions) (ContainerAcquireLeaseResponse, error) {
- containerAcquireLeaseOptions, modifiedAccessConditions := options.pointers()
+func (clc *ContainerLeaseClient) AcquireLease(ctx context.Context, options *ContainerAcquireLeaseOptions) (ContainerAcquireLeaseResponse, error) {
+ containerAcquireLeaseOptions, modifiedAccessConditions := options.format()
containerAcquireLeaseOptions.ProposedLeaseID = clc.leaseID
- resp, err := clc.client.AcquireLease(ctx, containerAcquireLeaseOptions, modifiedAccessConditions)
+ resp, err := clc.client.AcquireLease(ctx, &containerAcquireLeaseOptions, modifiedAccessConditions)
if err == nil && resp.LeaseID != nil {
clc.leaseID = resp.LeaseID
}
- return resp, handleError(err)
+ return toContainerAcquireLeaseResponse(resp), handleError(err)
}
// BreakLease breaks the container's previously-acquired lease (if it exists).
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container.
-func (clc *ContainerLeaseClient) BreakLease(ctx context.Context, options *BreakLeaseContainerOptions) (ContainerBreakLeaseResponse, error) {
- containerBreakLeaseOptions, modifiedAccessConditions := options.pointers()
+func (clc *ContainerLeaseClient) BreakLease(ctx context.Context, options *ContainerBreakLeaseOptions) (ContainerBreakLeaseResponse, error) {
+ containerBreakLeaseOptions, modifiedAccessConditions := options.format()
resp, err := clc.client.BreakLease(ctx, containerBreakLeaseOptions, modifiedAccessConditions)
- return resp, handleError(err)
+ return toContainerBreakLeaseResponse(resp), handleError(err)
}
// ChangeLease changes the container's lease ID.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container.
-func (clc *ContainerLeaseClient) ChangeLease(ctx context.Context, options *ChangeLeaseContainerOptions) (ContainerChangeLeaseResponse, error) {
+func (clc *ContainerLeaseClient) ChangeLease(ctx context.Context, options *ContainerChangeLeaseOptions) (ContainerChangeLeaseResponse, error) {
if clc.leaseID == nil {
return ContainerChangeLeaseResponse{}, errors.New("leaseID cannot be nil")
}
- proposedLeaseID, modifiedAccessConditions, err := options.pointers()
+
+ proposedLeaseID, changeLeaseOptions, modifiedAccessConditions, err := options.format()
if err != nil {
return ContainerChangeLeaseResponse{}, err
}
- resp, err := clc.client.ChangeLease(ctx, *clc.leaseID, *proposedLeaseID, nil, modifiedAccessConditions)
+ resp, err := clc.client.ChangeLease(ctx, *clc.leaseID, *proposedLeaseID, changeLeaseOptions, modifiedAccessConditions)
if err == nil && resp.LeaseID != nil {
clc.leaseID = resp.LeaseID
}
- return resp, handleError(err)
+ return toContainerChangeLeaseResponse(resp), handleError(err)
}
// ReleaseLease releases the container's previously-acquired lease.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container.
-func (clc *ContainerLeaseClient) ReleaseLease(ctx context.Context, options *ReleaseLeaseContainerOptions) (ContainerReleaseLeaseResponse, error) {
- containerReleaseLeaseOptions, modifiedAccessConditions := options.pointers()
+func (clc *ContainerLeaseClient) ReleaseLease(ctx context.Context, options *ContainerReleaseLeaseOptions) (ContainerReleaseLeaseResponse, error) {
+ if clc.leaseID == nil {
+ return ContainerReleaseLeaseResponse{}, errors.New("leaseID cannot be nil")
+ }
+ containerReleaseLeaseOptions, modifiedAccessConditions := options.format()
resp, err := clc.client.ReleaseLease(ctx, *clc.leaseID, containerReleaseLeaseOptions, modifiedAccessConditions)
- return resp, handleError(err)
+
+ return toContainerReleaseLeaseResponse(resp), handleError(err)
}
// RenewLease renews the container's previously-acquired lease.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container.
-func (clc *ContainerLeaseClient) RenewLease(ctx context.Context, options *RenewLeaseContainerOptions) (ContainerRenewLeaseResponse, error) {
+func (clc *ContainerLeaseClient) RenewLease(ctx context.Context, options *ContainerRenewLeaseOptions) (ContainerRenewLeaseResponse, error) {
if clc.leaseID == nil {
return ContainerRenewLeaseResponse{}, errors.New("leaseID cannot be nil")
}
- renewLeaseBlobOptions, modifiedAccessConditions := options.pointers()
+ renewLeaseBlobOptions, modifiedAccessConditions := options.format()
resp, err := clc.client.RenewLease(ctx, *clc.leaseID, renewLeaseBlobOptions, modifiedAccessConditions)
if err == nil && resp.LeaseID != nil {
clc.leaseID = resp.LeaseID
}
- return resp, handleError(err)
+ return toContainerRenewLeaseResponse(resp), handleError(err)
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_page_blob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_page_blob_client.go
index db79f887f58..507993b9e5d 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_page_blob_client.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_page_blob_client.go
@@ -1,3 +1,6 @@
+//go:build go1.18
+// +build go1.18
+
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
@@ -5,6 +8,7 @@ package azblob
import (
"context"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
"io"
"net/url"
@@ -12,11 +16,7 @@ import (
"github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
)
-const (
- // PageBlobPageBytes indicates the number of bytes in a page (512).
- PageBlobPageBytes = 512
-)
-
+// PageBlobClient represents a client to an Azure Storage page blob;
type PageBlobClient struct {
BlobClient
client *pageBlobClient
@@ -24,88 +24,119 @@ type PageBlobClient struct {
// NewPageBlobClient creates a ServiceClient object using the specified URL, Azure AD credential, and options.
// Example of serviceURL: https://.blob.core.windows.net
-func NewPageBlobClient(blobURL string, cred azcore.TokenCredential, options *ClientOptions) (PageBlobClient, error) {
+func NewPageBlobClient(blobURL string, cred azcore.TokenCredential, options *ClientOptions) (*PageBlobClient, error) {
authPolicy := runtime.NewBearerTokenPolicy(cred, []string{tokenScope}, nil)
- con := newConnection(blobURL, authPolicy, options.getConnectionOptions())
- return PageBlobClient{
- client: &pageBlobClient{con: con},
- BlobClient: BlobClient{client: &blobClient{con: con}},
+ conOptions := getConnectionOptions(options)
+ conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy)
+ conn := newConnection(blobURL, conOptions)
+
+ return &PageBlobClient{
+ client: newPageBlobClient(conn.Endpoint(), conn.Pipeline()),
+ BlobClient: BlobClient{
+ client: newBlobClient(conn.Endpoint(), conn.Pipeline()),
+ },
}, nil
}
// NewPageBlobClientWithNoCredential creates a ServiceClient object using the specified URL and options.
// Example of serviceURL: https://.blob.core.windows.net?
-func NewPageBlobClientWithNoCredential(blobURL string, options *ClientOptions) (PageBlobClient, error) {
- con := newConnection(blobURL, nil, options.getConnectionOptions())
- return PageBlobClient{
- client: &pageBlobClient{con: con},
- BlobClient: BlobClient{client: &blobClient{con: con}},
+func NewPageBlobClientWithNoCredential(blobURL string, options *ClientOptions) (*PageBlobClient, error) {
+ conOptions := getConnectionOptions(options)
+ conn := newConnection(blobURL, conOptions)
+
+ return &PageBlobClient{
+ client: newPageBlobClient(conn.Endpoint(), conn.Pipeline()),
+ BlobClient: BlobClient{
+ client: newBlobClient(conn.Endpoint(), conn.Pipeline()),
+ },
}, nil
}
// NewPageBlobClientWithSharedKey creates a ServiceClient object using the specified URL, shared key, and options.
// Example of serviceURL: https://.blob.core.windows.net
-func NewPageBlobClientWithSharedKey(blobURL string, cred *SharedKeyCredential, options *ClientOptions) (PageBlobClient, error) {
+func NewPageBlobClientWithSharedKey(blobURL string, cred *SharedKeyCredential, options *ClientOptions) (*PageBlobClient, error) {
authPolicy := newSharedKeyCredPolicy(cred)
- con := newConnection(blobURL, authPolicy, options.getConnectionOptions())
- return PageBlobClient{
- client: &pageBlobClient{con: con},
- BlobClient: BlobClient{client: &blobClient{con: con}},
+ conOptions := getConnectionOptions(options)
+ conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy)
+ conn := newConnection(blobURL, conOptions)
+
+ return &PageBlobClient{
+ client: newPageBlobClient(conn.Endpoint(), conn.Pipeline()),
+ BlobClient: BlobClient{
+ client: newBlobClient(conn.Endpoint(), conn.Pipeline()),
+ sharedKey: cred,
+ },
}, nil
}
// WithSnapshot creates a new PageBlobURL object identical to the source but with the specified snapshot timestamp.
// Pass "" to remove the snapshot returning a URL to the base blob.
-func (pb PageBlobClient) WithSnapshot(snapshot string) PageBlobClient {
- p := NewBlobURLParts(pb.URL())
+func (pb *PageBlobClient) WithSnapshot(snapshot string) (*PageBlobClient, error) {
+ p, err := NewBlobURLParts(pb.URL())
+ if err != nil {
+ return nil, err
+ }
p.Snapshot = snapshot
- con := &connection{p.URL(), pb.client.con.p}
- return PageBlobClient{
- client: &pageBlobClient{con: con},
- BlobClient: BlobClient{client: &blobClient{con: con}},
- }
+ endpoint := p.URL()
+ pipeline := pb.client.pl
+ return &PageBlobClient{
+ client: newPageBlobClient(endpoint, pipeline),
+ BlobClient: BlobClient{
+ client: newBlobClient(endpoint, pipeline),
+ sharedKey: pb.sharedKey,
+ },
+ }, nil
}
// WithVersionID creates a new PageBlobURL object identical to the source but with the specified snapshot timestamp.
// Pass "" to remove the version returning a URL to the base blob.
-func (pb PageBlobClient) WithVersionID(versionID string) PageBlobClient {
- p := NewBlobURLParts(pb.URL())
- p.VersionID = versionID
-
- con := &connection{p.URL(), pb.client.con.p}
- return PageBlobClient{
- client: &pageBlobClient{con: con},
- BlobClient: BlobClient{client: &blobClient{con: con}},
+func (pb *PageBlobClient) WithVersionID(versionID string) (*PageBlobClient, error) {
+ p, err := NewBlobURLParts(pb.URL())
+ if err != nil {
+ return nil, err
}
+
+ p.VersionID = versionID
+ endpoint := p.URL()
+
+ pipeline := pb.client.pl
+ return &PageBlobClient{
+ client: newPageBlobClient(endpoint, pipeline),
+ BlobClient: BlobClient{
+ client: newBlobClient(endpoint, pipeline),
+ sharedKey: pb.sharedKey,
+ },
+ }, nil
}
// Create creates a page blob of the specified length. Call PutPage to upload data to a page blob.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-blob.
-func (pb PageBlobClient) Create(ctx context.Context, size int64, options *CreatePageBlobOptions) (PageBlobCreateResponse, error) {
- creationOptions, httpHeaders, cpkInfo, cpkScope, lac, mac := options.pointers()
+func (pb *PageBlobClient) Create(ctx context.Context, size int64, o *PageBlobCreateOptions) (PageBlobCreateResponse, error) {
+ createOptions, HTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions := o.format()
- resp, err := pb.client.Create(ctx, 0, size, creationOptions, httpHeaders, lac, cpkInfo, cpkScope, mac)
+ resp, err := pb.client.Create(ctx, 0, size, createOptions, HTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions)
- return resp, handleError(err)
+ return toPageBlobCreateResponse(resp), handleError(err)
}
// UploadPages writes 1 or more pages to the page blob. The start offset and the stream size must be a multiple of 512 bytes.
// This method panics if the stream is not at position 0.
// Note that the http client closes the body stream after the request is sent to the service.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-page.
-func (pb PageBlobClient) UploadPages(ctx context.Context, body io.ReadSeekCloser, options *UploadPagesOptions) (PageBlobUploadPagesResponse, error) {
+func (pb *PageBlobClient) UploadPages(ctx context.Context, body io.ReadSeekCloser, options *PageBlobUploadPagesOptions) (PageBlobUploadPagesResponse, error) {
count, err := validateSeekableStreamAt0AndGetCount(body)
if err != nil {
return PageBlobUploadPagesResponse{}, err
}
- uploadOptions, cpkInfo, cpkScope, snac, lac, mac := options.pointers()
+ uploadPagesOptions, leaseAccessConditions, cpkInfo, cpkScopeInfo, sequenceNumberAccessConditions, modifiedAccessConditions := options.format()
- resp, err := pb.client.UploadPages(ctx, count, body, uploadOptions, lac, cpkInfo, cpkScope, snac, mac)
+ resp, err := pb.client.UploadPages(ctx, count, body, uploadPagesOptions, leaseAccessConditions,
+ cpkInfo, cpkScopeInfo, sequenceNumberAccessConditions, modifiedAccessConditions)
- return resp, handleError(err)
+ return toPageBlobUploadPagesResponse(resp), handleError(err)
}
// UploadPagesFromURL copies 1 or more pages from a source URL to the page blob.
@@ -113,89 +144,99 @@ func (pb PageBlobClient) UploadPages(ctx context.Context, body io.ReadSeekCloser
// The destOffset specifies the start offset of data in page blob will be written to.
// The count must be a multiple of 512 bytes.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-page-from-url.
-func (pb PageBlobClient) UploadPagesFromURL(ctx context.Context, source string, sourceOffset, destOffset, count int64, options *UploadPagesFromURLOptions) (PageBlobUploadPagesFromURLResponse, error) {
- uploadOptions, cpkInfo, cpkScope, snac, smac, lac, mac := options.pointers()
+func (pb *PageBlobClient) UploadPagesFromURL(ctx context.Context, source string, sourceOffset, destOffset, count int64,
+ options *PageBlobUploadPagesFromURLOptions) (PageBlobUploadPagesFromURLResponse, error) {
+
+ uploadPagesFromURLOptions, cpkInfo, cpkScopeInfo, leaseAccessConditions, sequenceNumberAccessConditions, modifiedAccessConditions, sourceModifiedAccessConditions := options.format()
- resp, err := pb.client.UploadPagesFromURL(ctx, source, rangeToString(sourceOffset, count), 0, rangeToString(destOffset, count), uploadOptions, cpkInfo, cpkScope, lac, snac, mac, smac)
+ resp, err := pb.client.UploadPagesFromURL(ctx, source, rangeToString(sourceOffset, count), 0,
+ rangeToString(destOffset, count), uploadPagesFromURLOptions, cpkInfo, cpkScopeInfo, leaseAccessConditions,
+ sequenceNumberAccessConditions, modifiedAccessConditions, sourceModifiedAccessConditions)
- return resp, handleError(err)
+ return toPageBlobUploadPagesFromURLResponse(resp), handleError(err)
}
// ClearPages frees the specified pages from the page blob.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-page.
-func (pb PageBlobClient) ClearPages(ctx context.Context, pageRange HttpRange, options *ClearPagesOptions) (PageBlobClearPagesResponse, error) {
- clearOptions := &PageBlobClearPagesOptions{
- Range: pageRange.pointers(),
+func (pb *PageBlobClient) ClearPages(ctx context.Context, pageRange HttpRange, options *PageBlobClearPagesOptions) (PageBlobClearPagesResponse, error) {
+ clearOptions := &pageBlobClientClearPagesOptions{
+ Range: pageRange.format(),
}
- cpkInfo, cpkScope, snac, lac, mac := options.pointers()
+ leaseAccessConditions, cpkInfo, cpkScopeInfo, sequenceNumberAccessConditions, modifiedAccessConditions := options.format()
- resp, err := pb.client.ClearPages(ctx, 0, clearOptions, lac, cpkInfo, cpkScope, snac, mac)
+ resp, err := pb.client.ClearPages(ctx, 0, clearOptions, leaseAccessConditions, cpkInfo,
+ cpkScopeInfo, sequenceNumberAccessConditions, modifiedAccessConditions)
- return resp, handleError(err)
+ return toPageBlobClearPagesResponse(resp), handleError(err)
}
// GetPageRanges returns the list of valid page ranges for a page blob or snapshot of a page blob.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-page-ranges.
-func (pb PageBlobClient) GetPageRanges(ctx context.Context, pageRange HttpRange, options *GetPageRangesOptions) (PageBlobGetPageRangesResponse, error) {
- snapshot, lac, mac := options.pointers()
-
- getRangesOptions := &PageBlobGetPageRangesOptions{
- Range: pageRange.pointers(),
- Snapshot: snapshot,
+func (pb *PageBlobClient) GetPageRanges(options *PageBlobGetPageRangesOptions) *PageBlobGetPageRangesPager {
+ getPageRangesOptions, leaseAccessConditions, modifiedAccessConditions := options.format()
+
+ pageBlobGetPageRangesPager := pb.client.GetPageRanges(getPageRangesOptions, leaseAccessConditions, modifiedAccessConditions)
+
+ // Fixing Advancer
+ pageBlobGetPageRangesPager.advancer = func(ctx context.Context, response pageBlobClientGetPageRangesResponse) (*policy.Request, error) {
+ getPageRangesOptions.Marker = response.NextMarker
+ req, err := pb.client.getPageRangesCreateRequest(ctx, getPageRangesOptions, leaseAccessConditions, modifiedAccessConditions)
+ if err != nil {
+ return nil, handleError(err)
+ }
+ queryValues, err := url.ParseQuery(req.Raw().URL.RawQuery)
+ if err != nil {
+ return nil, handleError(err)
+ }
+ req.Raw().URL.RawQuery = queryValues.Encode()
+ return req, nil
}
- resp, err := pb.client.GetPageRanges(ctx, getRangesOptions, lac, mac)
-
- return resp, handleError(err)
+ return toPageBlobGetPageRangesPager(pageBlobGetPageRangesPager)
}
-// GetManagedDiskPageRangesDiff gets the collection of page ranges that differ between a specified snapshot and this page blob representing managed disk.
-// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-page-ranges.
-//func (pb PageBlobURL) GetManagedDiskPageRangesDiff(ctx context.Context, offset int64, count int64, prevSnapshot *string, prevSnapshotURL *string, ac BlobAccessConditions) (*PageList, error) {
-// ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers()
-//
-// return pb.pbClient.GetPageRangesDiff(ctx, nil, nil, prevSnapshot,
-// prevSnapshotURL, // Get managed disk diff
-// HttpRange{offset: offset, count: count}.pointers(),
-// ac.LeaseAccessConditions.pointers(),
-// ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag,
-// nil, // Blob ifTags
-// nil)
-//}
-
// GetPageRangesDiff gets the collection of page ranges that differ between a specified snapshot and this page blob.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-page-ranges.
-func (pb PageBlobClient) GetPageRangesDiff(ctx context.Context, pageRange HttpRange, prevSnapshot string, options *GetPageRangesOptions) (PageBlobGetPageRangesDiffResponse, error) {
- snapshot, lac, mac := options.pointers()
-
- diffOptions := &PageBlobGetPageRangesDiffOptions{
- Prevsnapshot: &prevSnapshot,
- Range: pageRange.pointers(),
- Snapshot: snapshot,
+func (pb *PageBlobClient) GetPageRangesDiff(options *PageBlobGetPageRangesDiffOptions) *PageBlobGetPageRangesDiffPager {
+ getPageRangesDiffOptions, leaseAccessConditions, modifiedAccessConditions := options.format()
+
+ getPageRangesDiffPager := pb.client.GetPageRangesDiff(getPageRangesDiffOptions, leaseAccessConditions, modifiedAccessConditions)
+
+ // Fixing Advancer
+ getPageRangesDiffPager.advancer = func(ctx context.Context, response pageBlobClientGetPageRangesDiffResponse) (*policy.Request, error) {
+ getPageRangesDiffOptions.Marker = response.NextMarker
+ req, err := pb.client.getPageRangesDiffCreateRequest(ctx, getPageRangesDiffOptions, leaseAccessConditions, modifiedAccessConditions)
+ if err != nil {
+ return nil, handleError(err)
+ }
+ queryValues, err := url.ParseQuery(req.Raw().URL.RawQuery)
+ if err != nil {
+ return nil, handleError(err)
+ }
+ req.Raw().URL.RawQuery = queryValues.Encode()
+ return req, nil
}
- resp, err := pb.client.GetPageRangesDiff(ctx, diffOptions, lac, mac)
-
- return resp, handleError(err)
+ return toPageBlobGetPageRangesDiffPager(getPageRangesDiffPager)
}
// Resize resizes the page blob to the specified size (which must be a multiple of 512).
// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties.
-func (pb PageBlobClient) Resize(ctx context.Context, size int64, options *ResizePageBlobOptions) (PageBlobResizeResponse, error) {
- cpkInfo, cpkScope, lac, mac := options.pointers()
+func (pb *PageBlobClient) Resize(ctx context.Context, size int64, options *PageBlobResizeOptions) (PageBlobResizeResponse, error) {
+ resizeOptions, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions := options.format()
- resp, err := pb.client.Resize(ctx, size, nil, lac, cpkInfo, cpkScope, mac)
+ resp, err := pb.client.Resize(ctx, size, resizeOptions, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions)
- return resp, handleError(err)
+ return toPageBlobResizeResponse(resp), handleError(err)
}
// UpdateSequenceNumber sets the page blob's sequence number.
-func (pb PageBlobClient) UpdateSequenceNumber(ctx context.Context, options *UpdateSequenceNumberPageBlob) (PageBlobUpdateSequenceNumberResponse, error) {
- updateOptions, actionType, lac, mac := options.pointers()
+func (pb *PageBlobClient) UpdateSequenceNumber(ctx context.Context, options *PageBlobUpdateSequenceNumberOptions) (PageBlobUpdateSequenceNumberResponse, error) {
+ actionType, updateOptions, lac, mac := options.format()
resp, err := pb.client.UpdateSequenceNumber(ctx, *actionType, updateOptions, lac, mac)
- return resp, handleError(err)
+ return toPageBlobUpdateSequenceNumberResponse(resp), handleError(err)
}
// StartCopyIncremental begins an operation to start an incremental copy from one page blob's snapshot to this page blob.
@@ -203,15 +244,18 @@ func (pb PageBlobClient) UpdateSequenceNumber(ctx context.Context, options *Upda
// The copied snapshots are complete copies of the original snapshot and can be read or copied from as usual.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/incremental-copy-blob and
// https://docs.microsoft.com/en-us/azure/virtual-machines/windows/incremental-snapshots.
-func (pb PageBlobClient) StartCopyIncremental(ctx context.Context, source string, prevSnapshot string, options *CopyIncrementalPageBlobOptions) (PageBlobCopyIncrementalResponse, error) {
- srcURL, _ := url.Parse(source)
+func (pb *PageBlobClient) StartCopyIncremental(ctx context.Context, copySource string, prevSnapshot string, options *PageBlobCopyIncrementalOptions) (PageBlobCopyIncrementalResponse, error) {
+ copySourceURL, err := url.Parse(copySource)
+ if err != nil {
+ return PageBlobCopyIncrementalResponse{}, err
+ }
- queryParams := srcURL.Query()
+ queryParams := copySourceURL.Query()
queryParams.Set("snapshot", prevSnapshot)
- srcURL.RawQuery = queryParams.Encode()
+ copySourceURL.RawQuery = queryParams.Encode()
- pageBlobCopyIncrementalOptions, modifiedAccessConditions := options.pointers()
- resp, err := pb.client.CopyIncremental(ctx, srcURL.String(), pageBlobCopyIncrementalOptions, modifiedAccessConditions)
+ pageBlobCopyIncrementalOptions, modifiedAccessConditions := options.format()
+ resp, err := pb.client.CopyIncremental(ctx, copySourceURL.String(), pageBlobCopyIncrementalOptions, modifiedAccessConditions)
- return resp, handleError(err)
+ return toPageBlobCopyIncrementalResponse(resp), handleError(err)
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_parsing_urls.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_parsing_urls.go
index ee9ee530097..062587604e8 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_parsing_urls.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_parsing_urls.go
@@ -1,3 +1,6 @@
+//go:build go1.18
+// +build go1.18
+
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
@@ -11,10 +14,11 @@ import (
const (
snapshot = "snapshot"
+ versionId = "versionid"
SnapshotTimeFormat = "2006-01-02T15:04:05.0000000Z07:00"
)
-// A BlobURLParts object represents the components that make up an Azure Storage Container/Blob URL. You parse an
+// BlobURLParts object represents the components that make up an Azure Storage Container/Blob URL. You parse an
// existing URL into its parts by calling NewBlobURLParts(). You construct a URL from parts by calling URL().
// NOTE: Changing any SAS-related field requires computing a new SAS signature.
type BlobURLParts struct {
@@ -56,8 +60,11 @@ func isIPEndpointStyle(host string) bool {
// NewBlobURLParts parses a URL initializing BlobURLParts' fields including any SAS-related & snapshot query parameters. Any other
// query parameters remain in the UnparsedParams field. This method overwrites all fields in the BlobURLParts object.
-func NewBlobURLParts(u string) BlobURLParts {
- uri, _ := url.Parse(u)
+func NewBlobURLParts(u string) (BlobURLParts, error) {
+ uri, err := url.Parse(u)
+ if err != nil {
+ return BlobURLParts{}, err
+ }
up := BlobURLParts{
Scheme: uri.Scheme,
@@ -73,6 +80,7 @@ func NewBlobURLParts(u string) BlobURLParts {
if isIPEndpointStyle(up.Host) {
if accountEndIndex := strings.Index(path, "/"); accountEndIndex == -1 { // Slash not found; path has account name & no container name or blob
up.IPEndpointStyleInfo.AccountName = path
+ path = "" // No ContainerName present in the URL so path should be empty
} else {
up.IPEndpointStyleInfo.AccountName = path[:accountEndIndex] // The account name is the part between the slashes
path = path[accountEndIndex+1:] // path refers to portion after the account name now (container & blob names)
@@ -97,9 +105,18 @@ func NewBlobURLParts(u string) BlobURLParts {
// If we recognized the query parameter, remove it from the map
delete(paramsMap, snapshot)
}
+
+ up.VersionID = "" // Assume no versionID
+ if versionIDs, ok := caseInsensitiveValues(paramsMap).Get(versionId); ok {
+ up.VersionID = versionIDs[0]
+ // If we recognized the query parameter, remove it from the map
+ delete(paramsMap, versionId) // delete "versionid" from paramsMap
+ delete(paramsMap, "versionId") // delete "versionId" from paramsMap
+ }
+
up.SAS = newSASQueryParameters(paramsMap, true)
up.UnparsedParams = paramsMap.Encode()
- return up
+ return up, nil
}
type caseInsensitiveValues url.Values // map[string][]string
@@ -135,6 +152,14 @@ func (up BlobURLParts) URL() string {
up.Snapshot = up.SAS.snapshotTime.Format(SnapshotTimeFormat)
}
+ // Concatenate blob version id query parameter (if it exists)
+ if up.VersionID != "" {
+ if len(rawQuery) > 0 {
+ rawQuery += "&"
+ }
+ rawQuery += versionId + "=" + up.VersionID
+ }
+
// Concatenate blob snapshot query parameter (if it exists)
if up.Snapshot != "" {
if len(rawQuery) > 0 {
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_response_error.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_response_error.go
index 5dc1021bed7..3f987843904 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_response_error.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_response_error.go
@@ -1,3 +1,6 @@
+//go:build go1.18
+// +build go1.18
+
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
@@ -5,6 +8,7 @@ package azblob
import "net/http"
+// ResponseError is a wrapper of error passed from service
type ResponseError interface {
Error() string
Unwrap() error
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_response_helpers.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_response_helpers.go
index b4b853031a7..dda993d1c96 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_response_helpers.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_response_helpers.go
@@ -1,14 +1,11 @@
+//go:build go1.18
+// +build go1.18
+
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package azblob
-import (
- "context"
- "io"
- "net/http"
-)
-
// GetHTTPHeaders returns the user-modifiable properties for this blob.
func (bgpr BlobGetPropertiesResponse) GetHTTPHeaders() BlobHTTPHeaders {
return BlobHTTPHeaders{
@@ -24,57 +21,15 @@ func (bgpr BlobGetPropertiesResponse) GetHTTPHeaders() BlobHTTPHeaders {
///////////////////////////////////////////////////////////////////////////////
// GetHTTPHeaders returns the user-modifiable properties for this blob.
-func (dr BlobDownloadResponse) GetHTTPHeaders() BlobHTTPHeaders {
+func (r BlobDownloadResponse) GetHTTPHeaders() BlobHTTPHeaders {
return BlobHTTPHeaders{
- BlobContentType: dr.ContentType,
- BlobContentEncoding: dr.ContentEncoding,
- BlobContentLanguage: dr.ContentLanguage,
- BlobContentDisposition: dr.ContentDisposition,
- BlobCacheControl: dr.CacheControl,
- BlobContentMD5: dr.ContentMD5,
+ BlobContentType: r.ContentType,
+ BlobContentEncoding: r.ContentEncoding,
+ BlobContentLanguage: r.ContentLanguage,
+ BlobContentDisposition: r.ContentDisposition,
+ BlobCacheControl: r.CacheControl,
+ BlobContentMD5: r.ContentMD5,
}
}
///////////////////////////////////////////////////////////////////////////////
-
-// DownloadResponse wraps AutoRest generated DownloadResponse and helps to provide info for retry.
-type DownloadResponse struct {
- BlobDownloadResponse
- ctx context.Context
- b BlobClient
- getInfo HTTPGetterInfo
- ObjectReplicationRules []ObjectReplicationPolicy
-}
-
-// Body constructs new RetryReader stream for reading data. If a connection fails
-// while reading, it will make additional requests to reestablish a connection and
-// continue reading. Specifying a RetryReaderOption's with MaxRetryRequests set to 0
-// (the default), returns the original response body and no retries will be performed.
-// Pass in nil for options to accept the default options.
-func (r *DownloadResponse) Body(options *RetryReaderOptions) io.ReadCloser {
- if options == nil {
- options = &RetryReaderOptions{}
- }
- if options.MaxRetryRequests == 0 { // No additional retries
- return r.RawResponse.Body
- }
- return NewRetryReader(r.ctx, r.RawResponse, r.getInfo, *options,
- func(ctx context.Context, getInfo HTTPGetterInfo) (*http.Response, error) {
- accessConditions := &BlobAccessConditions{
- ModifiedAccessConditions: &ModifiedAccessConditions{IfMatch: &getInfo.ETag},
- }
- options := DownloadBlobOptions{
- Offset: &getInfo.Offset,
- Count: &getInfo.Count,
- BlobAccessConditions: accessConditions,
- CpkInfo: options.CpkInfo,
- //CpkScopeInfo: o.CpkScopeInfo,
- }
- resp, err := r.b.Download(ctx, &options)
- if err != nil {
- return nil, err
- }
- return resp.RawResponse, err
- },
- )
-}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_retry_reader.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_retry_reader.go
index 47524cec4f9..3179138f111 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_retry_reader.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_retry_reader.go
@@ -1,3 +1,6 @@
+//go:build go1.18
+// +build go1.18
+
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_sas_account.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_sas_account.go
index e8b75641a26..b4104def583 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_sas_account.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_sas_account.go
@@ -1,3 +1,6 @@
+//go:build go1.18
+// +build go1.18
+
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
@@ -79,7 +82,7 @@ func (v AccountSASSignatureValues) Sign(sharedKeyCredential *SharedKeyCredential
return p, nil
}
-// The AccountSASPermissions type simplifies creating the permissions string for an Azure Storage Account SAS.
+// AccountSASPermissions type simplifies creating the permissions string for an Azure Storage Account SAS.
// Initialize an instance of this type and then call its String method to set AccountSASSignatureValues's Permissions field.
type AccountSASPermissions struct {
Read, Write, Delete, DeletePreviousVersion, List, Add, Create, Update, Process, Tag, FilterByTags bool
@@ -159,7 +162,7 @@ func (p *AccountSASPermissions) Parse(s string) error {
return nil
}
-// The AccountSASServices type simplifies creating the services string for an Azure Storage Account SAS.
+// AccountSASServices type simplifies creating the services string for an Azure Storage Account SAS.
// Initialize an instance of this type and then call its String method to set AccountSASSignatureValues's Services field.
type AccountSASServices struct {
Blob, Queue, File bool
@@ -199,7 +202,7 @@ func (s *AccountSASServices) Parse(str string) error {
return nil
}
-// The AccountSASResourceTypes type simplifies creating the resource types string for an Azure Storage Account SAS.
+// AccountSASResourceTypes type simplifies creating the resource types string for an Azure Storage Account SAS.
// Initialize an instance of this type and then call its String method to set AccountSASSignatureValues's ResourceTypes field.
type AccountSASResourceTypes struct {
Service, Container, Object bool
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_sas_query_params.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_sas_query_params.go
index 1b65ac860ab..7efbec9b8cf 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_sas_query_params.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_sas_query_params.go
@@ -1,3 +1,6 @@
+//go:build go1.18
+// +build go1.18
+
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
@@ -76,11 +79,10 @@ func parseSASTimeString(val string) (t time.Time, timeFormat string, err error)
// https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas
-// A SASQueryParameters object represents the components that make up an Azure Storage SAS' query parameters.
+// SASQueryParameters object represents the components that make up an Azure Storage SAS' query parameters.
// You parse a map of query parameters into its fields by calling NewSASQueryParameters(). You add the components
// to a query parameter map by calling AddToValues().
// NOTE: Changing any field requires computing a new SAS signature using a XxxSASSignatureValues type.
-//
// This type defines the components used by all Azure Storage resources (Containers, Blobs, Files, & Queues).
type SASQueryParameters struct {
// All members are immutable or values so copies of this struct are goroutine-safe.
@@ -116,101 +118,132 @@ type SASQueryParameters struct {
seTimeFormat string
}
+// PreauthorizedAgentObjectId returns preauthorizedAgentObjectId
func (p *SASQueryParameters) PreauthorizedAgentObjectId() string {
return p.preauthorizedAgentObjectId
}
+// AgentObjectId returns agentObjectId
func (p *SASQueryParameters) AgentObjectId() string {
return p.agentObjectId
}
+// SignedCorrelationId returns signedCorrelationId
func (p *SASQueryParameters) SignedCorrelationId() string {
return p.correlationId
}
+// SignedTid returns aignedTid
func (p *SASQueryParameters) SignedTid() string {
return p.signedTid
}
+// SignedStart returns signedStart
func (p *SASQueryParameters) SignedStart() time.Time {
return p.signedStart
}
+// SignedExpiry returns signedExpiry
func (p *SASQueryParameters) SignedExpiry() time.Time {
return p.signedExpiry
}
+// SignedService returns signedService
func (p *SASQueryParameters) SignedService() string {
return p.signedService
}
+// SignedVersion returns signedVersion
func (p *SASQueryParameters) SignedVersion() string {
return p.signedVersion
}
+// SnapshotTime returns snapshotTime
func (p *SASQueryParameters) SnapshotTime() time.Time {
return p.snapshotTime
}
+// Version returns version
func (p *SASQueryParameters) Version() string {
return p.version
}
+// Services returns services
func (p *SASQueryParameters) Services() string {
return p.services
}
+
+// ResourceTypes returns resourceTypes
func (p *SASQueryParameters) ResourceTypes() string {
return p.resourceTypes
}
+
+// Protocol returns protocol
func (p *SASQueryParameters) Protocol() SASProtocol {
return p.protocol
}
+
+// StartTime returns startTime
func (p *SASQueryParameters) StartTime() time.Time {
return p.startTime
}
+
+// ExpiryTime returns expiryTime
func (p *SASQueryParameters) ExpiryTime() time.Time {
return p.expiryTime
}
+// IPRange returns ipRange
func (p *SASQueryParameters) IPRange() IPRange {
return p.ipRange
}
+// Identifier returns identifier
func (p *SASQueryParameters) Identifier() string {
return p.identifier
}
+// Resource returns resource
func (p *SASQueryParameters) Resource() string {
return p.resource
}
+
+// Permissions returns permissions
func (p *SASQueryParameters) Permissions() string {
return p.permissions
}
+// Signature returns signature
func (p *SASQueryParameters) Signature() string {
return p.signature
}
+// CacheControl returns cacheControl
func (p *SASQueryParameters) CacheControl() string {
return p.cacheControl
}
+// ContentDisposition returns contentDisposition
func (p *SASQueryParameters) ContentDisposition() string {
return p.contentDisposition
}
+// ContentEncoding returns contentEncoding
func (p *SASQueryParameters) ContentEncoding() string {
return p.contentEncoding
}
+// ContentLanguage returns contentLanguage
func (p *SASQueryParameters) ContentLanguage() string {
return p.contentLanguage
}
+// ContentType returns sontentType
func (p *SASQueryParameters) ContentType() string {
return p.contentType
}
+// SignedDirectoryDepth returns signedDirectoryDepth
func (p *SASQueryParameters) SignedDirectoryDepth() string {
return p.signedDirectoryDepth
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_sas_service.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_sas_service.go
index 2386124b9fd..488baed8c0c 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_sas_service.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_sas_service.go
@@ -1,3 +1,6 @@
+//go:build go1.18
+// +build go1.18
+
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
@@ -194,7 +197,7 @@ func getCanonicalName(account string, containerName string, blobName string, dir
return strings.Join(elements, "")
}
-// The ContainerSASPermissions type simplifies creating the permissions string for an Azure Storage container SAS.
+// ContainerSASPermissions type simplifies creating the permissions string for an Azure Storage container SAS.
// Initialize an instance of this type and then call its String method to set BlobSASSignatureValues's Permissions field.
// All permissions descriptions can be found here: https://docs.microsoft.com/en-us/rest/api/storageservices/create-service-sas#permissions-for-a-directory-container-or-blob
type ContainerSASPermissions struct {
@@ -276,7 +279,7 @@ func (p *ContainerSASPermissions) Parse(s string) error {
return nil
}
-// The BlobSASPermissions type simplifies creating the permissions string for an Azure Storage blob SAS.
+// BlobSASPermissions type simplifies creating the permissions string for an Azure Storage blob SAS.
// Initialize an instance of this type and then call its String method to set BlobSASSignatureValues's Permissions field.
type BlobSASPermissions struct {
Read, Add, Create, Write, Delete, DeletePreviousVersion, Tag, List, Move, Execute, Ownership, Permissions bool
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_service_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_service_client.go
index afe971b0afb..e75dd10b31e 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_service_client.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_service_client.go
@@ -1,3 +1,6 @@
+//go:build go1.18
+// +build go1.18
+
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
@@ -24,64 +27,61 @@ const (
ContainerNameLogs = "$logs"
)
-// A ServiceClient represents a URL to the Azure Blob Storage service allowing you to manipulate blob containers.
+// ServiceClient represents a URL to the Azure Blob Storage service allowing you to manipulate blob containers.
type ServiceClient struct {
client *serviceClient
- u url.URL
sharedKey *SharedKeyCredential
}
// URL returns the URL endpoint used by the ServiceClient object.
func (s ServiceClient) URL() string {
- return s.client.con.u
+ return s.client.endpoint
}
// NewServiceClient creates a ServiceClient object using the specified URL, Azure AD credential, and options.
// Example of serviceURL: https://.blob.core.windows.net
-func NewServiceClient(serviceURL string, cred azcore.TokenCredential, options *ClientOptions) (ServiceClient, error) {
- u, err := url.Parse(serviceURL)
- if err != nil {
- return ServiceClient{}, err
- }
-
+func NewServiceClient(serviceURL string, cred azcore.TokenCredential, options *ClientOptions) (*ServiceClient, error) {
authPolicy := runtime.NewBearerTokenPolicy(cred, []string{tokenScope}, nil)
- return ServiceClient{client: &serviceClient{
- con: newConnection(serviceURL, authPolicy, options.getConnectionOptions()),
- }, u: *u}, nil
+ conOptions := getConnectionOptions(options)
+ conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy)
+ conn := newConnection(serviceURL, conOptions)
+
+ return &ServiceClient{
+ client: newServiceClient(conn.Endpoint(), conn.Pipeline()),
+ }, nil
}
// NewServiceClientWithNoCredential creates a ServiceClient object using the specified URL and options.
// Example of serviceURL: https://.blob.core.windows.net?
-func NewServiceClientWithNoCredential(serviceURL string, options *ClientOptions) (ServiceClient, error) {
- u, err := url.Parse(serviceURL)
- if err != nil {
- return ServiceClient{}, err
- }
+func NewServiceClientWithNoCredential(serviceURL string, options *ClientOptions) (*ServiceClient, error) {
+ conOptions := getConnectionOptions(options)
+ conn := newConnection(serviceURL, conOptions)
- return ServiceClient{client: &serviceClient{
- con: newConnection(serviceURL, nil, options.getConnectionOptions()),
- }, u: *u}, nil
+ return &ServiceClient{
+ client: newServiceClient(conn.Endpoint(), conn.Pipeline()),
+ }, nil
}
// NewServiceClientWithSharedKey creates a ServiceClient object using the specified URL, shared key, and options.
// Example of serviceURL: https://.blob.core.windows.net
-func NewServiceClientWithSharedKey(serviceURL string, cred *SharedKeyCredential, options *ClientOptions) (ServiceClient, error) {
- u, err := url.Parse(serviceURL)
- if err != nil {
- return ServiceClient{}, err
- }
+func NewServiceClientWithSharedKey(serviceURL string, cred *SharedKeyCredential, options *ClientOptions) (*ServiceClient, error) {
authPolicy := newSharedKeyCredPolicy(cred)
- return ServiceClient{client: &serviceClient{
- con: newConnection(serviceURL, authPolicy, options.getConnectionOptions()),
- }, u: *u, sharedKey: cred}, nil
+ conOptions := getConnectionOptions(options)
+ conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy)
+ conn := newConnection(serviceURL, conOptions)
+
+ return &ServiceClient{
+ client: newServiceClient(conn.Endpoint(), conn.Pipeline()),
+ sharedKey: cred,
+ }, nil
}
// NewServiceClientFromConnectionString creates a service client from the given connection string.
//nolint
-func NewServiceClientFromConnectionString(connectionString string, options *ClientOptions) (ServiceClient, error) {
+func NewServiceClientFromConnectionString(connectionString string, options *ClientOptions) (*ServiceClient, error) {
endpoint, credential, err := parseConnectionString(connectionString)
if err != nil {
- return ServiceClient{}, err
+ return nil, err
}
return NewServiceClientWithSharedKey(endpoint, credential, options)
}
@@ -91,23 +91,22 @@ func NewServiceClientFromConnectionString(connectionString string, options *Clie
// To change the pipeline, create the ContainerClient and then call its WithPipeline method passing in the
// desired pipeline object. Or, call this package's NewContainerClient instead of calling this object's
// NewContainerClient method.
-func (s ServiceClient) NewContainerClient(containerName string) ContainerClient {
- containerURL := appendToURLPath(s.client.con.u, containerName)
- containerConnection := &connection{containerURL, s.client.con.p}
- return ContainerClient{
- client: &containerClient{
- con: containerConnection,
- },
+func (s *ServiceClient) NewContainerClient(containerName string) (*ContainerClient, error) {
+ containerURL := appendToURLPath(s.client.endpoint, containerName)
+ return &ContainerClient{
+ client: newContainerClient(containerURL, s.client.pl),
sharedKey: s.sharedKey,
- }
+ }, nil
}
// CreateContainer is a lifecycle method to creates a new container under the specified account.
-// If the container with the same name already exists, a ResourceExistsError will
-// be raised. This method returns a client with which to interact with the newly
-// created container.
-func (s ServiceClient) CreateContainer(ctx context.Context, containerName string, options *CreateContainerOptions) (ContainerCreateResponse, error) {
- containerClient := s.NewContainerClient(containerName)
+// If the container with the same name already exists, a ResourceExistsError will be raised.
+// This method returns a client with which to interact with the newly created container.
+func (s *ServiceClient) CreateContainer(ctx context.Context, containerName string, options *ContainerCreateOptions) (ContainerCreateResponse, error) {
+ containerClient, err := s.NewContainerClient(containerName)
+ if err != nil {
+ return ContainerCreateResponse{}, err
+ }
containerCreateResp, err := containerClient.Create(ctx, options)
return containerCreateResp, err
}
@@ -115,8 +114,8 @@ func (s ServiceClient) CreateContainer(ctx context.Context, containerName string
// DeleteContainer is a lifecycle method that marks the specified container for deletion.
// The container and any blobs contained within it are later deleted during garbage collection.
// If the container is not found, a ResourceNotFoundError will be raised.
-func (s ServiceClient) DeleteContainer(ctx context.Context, containerName string, options *DeleteContainerOptions) (ContainerDeleteResponse, error) {
- containerClient := s.NewContainerClient(containerName)
+func (s *ServiceClient) DeleteContainer(ctx context.Context, containerName string, options *ContainerDeleteOptions) (ContainerDeleteResponse, error) {
+ containerClient, _ := s.NewContainerClient(containerName)
containerDeleteResp, err := containerClient.Delete(ctx, options)
return containerDeleteResp, err
}
@@ -143,55 +142,59 @@ func appendToURLPath(u string, name string) string {
return uri.String()
}
-func (s ServiceClient) GetAccountInfo(ctx context.Context) (ServiceGetAccountInfoResponse, error) {
- resp, err := s.client.GetAccountInfo(ctx, nil)
-
- return resp, handleError(err)
+// GetAccountInfo provides account level information
+func (s *ServiceClient) GetAccountInfo(ctx context.Context, o *ServiceGetAccountInfoOptions) (ServiceGetAccountInfoResponse, error) {
+ getAccountInfoOptions := o.format()
+ resp, err := s.client.GetAccountInfo(ctx, getAccountInfoOptions)
+ return toServiceGetAccountInfoResponse(resp), handleError(err)
}
-// The ListContainers operation returns a pager of the containers under the specified account.
+// ListContainers operation returns a pager of the containers under the specified account.
// Use an empty Marker to start enumeration from the beginning. Container names are returned in lexicographic order.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/list-containers2.
-func (s ServiceClient) ListContainers(o *ListContainersOptions) *ServiceListContainersSegmentPager {
- listOptions := o.pointers()
+func (s *ServiceClient) ListContainers(o *ListContainersOptions) *ServiceListContainersSegmentPager {
+ listOptions := o.format()
pager := s.client.ListContainersSegment(listOptions)
- // override the generated advancer, which is incorrect
- if pager.Err() != nil {
- return pager
- }
+ //TODO: .Err()?
+ //// override the generated advancer, which is incorrect
+ //if pager.Err() != nil {
+ // return pager
+ //}
- pager.advancer = func(cxt context.Context, response ServiceListContainersSegmentResponse) (*policy.Request, error) {
+ pager.advancer = func(ctx context.Context, response serviceClientListContainersSegmentResponse) (*policy.Request, error) {
if response.ListContainersSegmentResponse.NextMarker == nil {
return nil, handleError(errors.New("unexpected missing NextMarker"))
}
- req, err := s.client.listContainersSegmentCreateRequest(cxt, listOptions)
+ req, err := s.client.listContainersSegmentCreateRequest(ctx, listOptions)
if err != nil {
return nil, handleError(err)
}
queryValues, _ := url.ParseQuery(req.Raw().URL.RawQuery)
- queryValues.Set("marker", *response.ServiceListContainersSegmentResult.NextMarker)
+ queryValues.Set("marker", *response.ListContainersSegmentResponse.NextMarker)
req.Raw().URL.RawQuery = queryValues.Encode()
return req, nil
}
- return pager
+ return toServiceListContainersSegmentPager(*pager)
}
// GetProperties - gets the properties of a storage account's Blob service, including properties for Storage Analytics
// and CORS (Cross-Origin Resource Sharing) rules.
-func (s ServiceClient) GetProperties(ctx context.Context) (ServiceGetPropertiesResponse, error) {
- resp, err := s.client.GetProperties(ctx, nil)
+func (s *ServiceClient) GetProperties(ctx context.Context, o *ServiceGetPropertiesOptions) (ServiceGetPropertiesResponse, error) {
+ getPropertiesOptions := o.format()
+ resp, err := s.client.GetProperties(ctx, getPropertiesOptions)
- return resp, handleError(err)
+ return toServiceGetPropertiesResponse(resp), handleError(err)
}
// SetProperties Sets the properties of a storage account's Blob service, including Azure Storage Analytics.
// If an element (e.g. analytics_logging) is left as None, the existing settings on the service for that functionality are preserved.
-func (s ServiceClient) SetProperties(ctx context.Context, properties StorageServiceProperties) (ServiceSetPropertiesResponse, error) {
- resp, err := s.client.SetProperties(ctx, properties, nil)
+func (s *ServiceClient) SetProperties(ctx context.Context, o *ServiceSetPropertiesOptions) (ServiceSetPropertiesResponse, error) {
+ properties, setPropertiesOptions := o.format()
+ resp, err := s.client.SetProperties(ctx, properties, setPropertiesOptions)
- return resp, handleError(err)
+ return toServiceSetPropertiesResponse(resp), handleError(err)
}
// GetStatistics Retrieves statistics related to replication for the Blob service.
@@ -208,29 +211,31 @@ func (s ServiceClient) SetProperties(ctx context.Context, properties StorageServ
// center that resides in the same region as the primary location. Read-only
// access is available from the secondary location, if read-access geo-redundant
// replication is enabled for your storage account.
-func (s ServiceClient) GetStatistics(ctx context.Context) (ServiceGetStatisticsResponse, error) {
- resp, err := s.client.GetStatistics(ctx, nil)
+func (s *ServiceClient) GetStatistics(ctx context.Context, o *ServiceGetStatisticsOptions) (ServiceGetStatisticsResponse, error) {
+ getStatisticsOptions := o.format()
+ resp, err := s.client.GetStatistics(ctx, getStatisticsOptions)
- return resp, handleError(err)
+ return toServiceGetStatisticsResponse(resp), handleError(err)
}
-func (s ServiceClient) CanGetAccountSASToken() bool {
+// CanGetAccountSASToken checks if shared key in ServiceClient is nil
+func (s *ServiceClient) CanGetAccountSASToken() bool {
return s.sharedKey != nil
}
-// GetSASToken is a convenience method for generating a SAS token for the currently pointed at account.
+// GetSASURL is a convenience method for generating a SAS token for the currently pointed at account.
// It can only be used if the credential supplied during creation was a SharedKeyCredential.
// This validity can be checked with CanGetAccountSASToken().
-func (s ServiceClient) GetSASToken(resources AccountSASResourceTypes, permissions AccountSASPermissions, services AccountSASServices, start time.Time, expiry time.Time) (string, error) {
+func (s *ServiceClient) GetSASURL(resources AccountSASResourceTypes, permissions AccountSASPermissions, start time.Time, expiry time.Time) (string, error) {
if s.sharedKey == nil {
- return "", errors.New("credential is not a SharedKeyCredential. SAS can only be signed with a SharedKeyCredential")
+ return "", errors.New("SAS can only be signed with a SharedKeyCredential")
}
qps, err := AccountSASSignatureValues{
Version: SASVersion,
Protocol: SASProtocolHTTPS,
Permissions: permissions.String(),
- Services: services.String(),
+ Services: "b",
ResourceTypes: resources.String(),
StartTime: start.UTC(),
ExpiryTime: expiry.UTC(),
@@ -238,11 +243,13 @@ func (s ServiceClient) GetSASToken(resources AccountSASResourceTypes, permission
if err != nil {
return "", err
}
- endpoint := s.client.con.Endpoint()
+
+ endpoint := s.URL()
if !strings.HasSuffix(endpoint, "/") {
endpoint += "/"
}
endpoint += "?" + qps.Encode()
+
return endpoint, nil
}
@@ -251,8 +258,9 @@ func (s ServiceClient) GetSASToken(resources AccountSASResourceTypes, permission
// https://docs.microsoft.com/en-us/rest/api/storageservices/find-blobs-by-tags
// eg. "dog='germanshepherd' and penguin='emperorpenguin'"
// To specify a container, eg. "@container=’containerName’ and Name = ‘C’"
-func (s ServiceClient) FindBlobsByTags(ctx context.Context, options ServiceFilterBlobsByTagsOptions) (ServiceFilterBlobsResponse, error) {
+func (s *ServiceClient) FindBlobsByTags(ctx context.Context, o *ServiceFilterBlobsOptions) (ServiceFilterBlobsResponse, error) {
// TODO: Use pager here? Missing support from zz_generated_pagers.go
- serviceFilterBlobsOptions := options.pointer()
- return s.client.FilterBlobs(ctx, serviceFilterBlobsOptions)
+ serviceFilterBlobsOptions := o.pointer()
+ resp, err := s.client.FilterBlobs(ctx, serviceFilterBlobsOptions)
+ return toServiceFilterBlobsResponse(resp), err
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_shared_policy_shared_key_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_shared_policy_shared_key_credential.go
index 0487768f215..60b1e5a76b6 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_shared_policy_shared_key_credential.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_shared_policy_shared_key_credential.go
@@ -1,3 +1,6 @@
+//go:build go1.18
+// +build go1.18
+
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
@@ -16,6 +19,7 @@ import (
"sync/atomic"
"time"
+ azlog "github.com/Azure/azure-sdk-for-go/sdk/azcore/log"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
"github.com/Azure/azure-sdk-for-go/sdk/internal/log"
)
@@ -187,7 +191,7 @@ func (s *sharedKeyCredPolicy) Do(req *policy.Request) (*http.Response, error) {
response, err := req.Next()
if err != nil && response != nil && response.StatusCode == http.StatusForbidden {
// Service failed to authenticate request, log it
- log.Write(log.EventResponse, "===== HTTP Forbidden status, String-to-NewSASQueryParameters:\n"+stringToSign+"\n===============================\n")
+ log.Write(azlog.EventResponse, "===== HTTP Forbidden status, String-to-NewSASQueryParameters:\n"+stringToSign+"\n===============================\n")
}
return response, err
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_storage_error.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_storage_error.go
index db6e1160ba1..08c9c873090 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_storage_error.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_storage_error.go
@@ -1,3 +1,6 @@
+//go:build go1.18
+// +build go1.18
+
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
@@ -22,6 +25,7 @@ type InternalError struct {
cause error
}
+// Error checks if InternalError can be cast as StorageError
func (e *InternalError) Error() string {
if (errors.Is(e.cause, StorageError{})) {
return e.cause.Error()
@@ -30,12 +34,14 @@ func (e *InternalError) Error() string {
return fmt.Sprintf("===== INTERNAL ERROR =====\n%s", e.cause.Error())
}
+// Is casts err into InternalError
func (e *InternalError) Is(err error) bool {
_, ok := err.(*InternalError)
return ok
}
+// As casts target interface into InternalError
func (e *InternalError) As(target interface{}) bool {
nt, ok := target.(**InternalError)
@@ -60,6 +66,9 @@ type StorageError struct {
}
func handleError(err error) error {
+ if err == nil {
+ return nil
+ }
var respErr *azcore.ResponseError
if errors.As(err, &respErr) {
return &InternalError{responseErrorToStorageError(respErr)}
@@ -138,6 +147,7 @@ func (e StorageError) Error() string {
// return e.ErrorNode.Error(b.String())
}
+// Is checks if err can be cast as StorageError
func (e StorageError) Is(err error) bool {
_, ok := err.(StorageError)
_, ok2 := err.(*StorageError)
@@ -145,6 +155,7 @@ func (e StorageError) Is(err error) bool {
return ok || ok2
}
+// Response returns StorageError.response
func (e StorageError) Response() *http.Response {
return e.response
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_validators.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_validators.go
index 5ebca43088f..341858f1ad8 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_validators.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_validators.go
@@ -1,3 +1,6 @@
+//go:build go1.18
+// +build go1.18
+
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
@@ -12,6 +15,7 @@ import (
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+// Raw converts PageRange into primitive start, end integers of type int64
func (pr *PageRange) Raw() (start, end int64) {
if pr.Start != nil {
start = *pr.Start
@@ -31,8 +35,12 @@ type HttpRange struct {
Count int64
}
-func (r HttpRange) pointers() *string {
- if r.Offset == 0 && r.Count == 0 { // Do common case first for performance
+func NewHttpRange(offset, count int64) *HttpRange {
+ return &HttpRange{Offset: offset, Count: count}
+}
+
+func (r *HttpRange) format() *string {
+ if r == nil || (r.Offset == 0 && r.Count == 0) { // Do common case first for performance
return nil // No specified range
}
endOffset := "" // if count == CountToEnd (0)
@@ -58,7 +66,7 @@ func getSourceRange(offset, count *int64) *string {
newCount = *count
}
- return HttpRange{Offset: newOffset, Count: newCount}.pointers()
+ return (&HttpRange{Offset: newOffset, Count: newCount}).format()
}
func validateSeekableStreamAt0AndGetCount(body io.ReadSeeker) (int64, error) {
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_access_conditions.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_access_conditions.go
index cb2e3f337ea..93a2b1a7007 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_access_conditions.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_access_conditions.go
@@ -1,3 +1,6 @@
+//go:build go1.18
+// +build go1.18
+
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
@@ -17,7 +20,7 @@ type ContainerAccessConditions struct {
LeaseAccessConditions *LeaseAccessConditions
}
-func (ac *ContainerAccessConditions) pointers() (*ModifiedAccessConditions, *LeaseAccessConditions) {
+func (ac *ContainerAccessConditions) format() (*ModifiedAccessConditions, *LeaseAccessConditions) {
if ac == nil {
return nil, nil
}
@@ -31,7 +34,7 @@ type BlobAccessConditions struct {
ModifiedAccessConditions *ModifiedAccessConditions
}
-func (ac *BlobAccessConditions) pointers() (*LeaseAccessConditions, *ModifiedAccessConditions) {
+func (ac *BlobAccessConditions) format() (*LeaseAccessConditions, *ModifiedAccessConditions) {
if ac == nil {
return nil, nil
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_append_blob_client_util.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_append_blob_client_util.go
new file mode 100644
index 00000000000..19c3fef66a9
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_append_blob_client_util.go
@@ -0,0 +1,184 @@
+//go:build go1.18
+// +build go1.18
+
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+package azblob
+
+import "time"
+
+// ---------------------------------------------------------------------------------------------------------------------
+
+// AppendBlobCreateOptions provides set of configurations for Create Append Blob operation
+type AppendBlobCreateOptions struct {
+ // Specifies the date time when the blobs immutability policy is set to expire.
+ ImmutabilityPolicyExpiry *time.Time
+ // Specifies the immutability policy mode to set on the blob.
+ ImmutabilityPolicyMode *BlobImmutabilityPolicyMode
+ // Specified if a legal hold should be set on the blob.
+ LegalHold *bool
+
+ BlobAccessConditions *BlobAccessConditions
+
+ HTTPHeaders *BlobHTTPHeaders
+
+ CpkInfo *CpkInfo
+
+ CpkScopeInfo *CpkScopeInfo
+ // Optional. Used to set blob tags in various blob operations.
+ TagsMap map[string]string
+ // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the
+ // operation will copy the metadata from the source blob or file to the destination blob. If one or more name-value pairs
+ // are specified, the destination blob is created with the specified metadata, and metadata is not copied from the source
+ // blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers.
+ // See Naming and Referencing Containers, Blobs, and Metadata for more information.
+ Metadata map[string]string
+}
+
+func (o *AppendBlobCreateOptions) format() (*appendBlobClientCreateOptions, *BlobHTTPHeaders, *LeaseAccessConditions,
+ *CpkInfo, *CpkScopeInfo, *ModifiedAccessConditions) {
+
+ if o == nil {
+ return nil, nil, nil, nil, nil, nil
+ }
+
+ options := appendBlobClientCreateOptions{
+ BlobTagsString: serializeBlobTagsToStrPtr(o.TagsMap),
+ Metadata: o.Metadata,
+ ImmutabilityPolicyExpiry: o.ImmutabilityPolicyExpiry,
+ ImmutabilityPolicyMode: o.ImmutabilityPolicyMode,
+ LegalHold: o.LegalHold,
+ }
+
+ leaseAccessConditions, modifiedAccessConditions := o.BlobAccessConditions.format()
+ return &options, o.HTTPHeaders, leaseAccessConditions, o.CpkInfo, o.CpkScopeInfo, modifiedAccessConditions
+}
+
+// AppendBlobCreateResponse contains the response from method AppendBlobClient.Create.
+type AppendBlobCreateResponse struct {
+ appendBlobClientCreateResponse
+}
+
+func toAppendBlobCreateResponse(resp appendBlobClientCreateResponse) AppendBlobCreateResponse {
+ return AppendBlobCreateResponse{resp}
+}
+
+// ---------------------------------------------------------------------------------------------------------------------
+
+// AppendBlobAppendBlockOptions provides set of configurations for AppendBlock operation
+type AppendBlobAppendBlockOptions struct {
+ // Specify the transactional crc64 for the body, to be validated by the service.
+ TransactionalContentCRC64 []byte
+ // Specify the transactional md5 for the body, to be validated by the service.
+ TransactionalContentMD5 []byte
+
+ AppendPositionAccessConditions *AppendPositionAccessConditions
+
+ CpkInfo *CpkInfo
+
+ CpkScopeInfo *CpkScopeInfo
+
+ BlobAccessConditions *BlobAccessConditions
+}
+
+func (o *AppendBlobAppendBlockOptions) format() (*appendBlobClientAppendBlockOptions, *AppendPositionAccessConditions, *CpkInfo, *CpkScopeInfo, *ModifiedAccessConditions, *LeaseAccessConditions) {
+ if o == nil {
+ return nil, nil, nil, nil, nil, nil
+ }
+
+ options := &appendBlobClientAppendBlockOptions{
+ TransactionalContentCRC64: o.TransactionalContentCRC64,
+ TransactionalContentMD5: o.TransactionalContentMD5,
+ }
+ leaseAccessConditions, modifiedAccessConditions := o.BlobAccessConditions.format()
+ return options, o.AppendPositionAccessConditions, o.CpkInfo, o.CpkScopeInfo, modifiedAccessConditions, leaseAccessConditions
+}
+
+// AppendBlobAppendBlockResponse contains the response from method AppendBlobClient.AppendBlock.
+type AppendBlobAppendBlockResponse struct {
+ appendBlobClientAppendBlockResponse
+}
+
+func toAppendBlobAppendBlockResponse(resp appendBlobClientAppendBlockResponse) AppendBlobAppendBlockResponse {
+ return AppendBlobAppendBlockResponse{resp}
+}
+
+// ---------------------------------------------------------------------------------------------------------------------
+
+// AppendBlobAppendBlockFromURLOptions provides set of configurations for AppendBlockFromURL operation
+type AppendBlobAppendBlockFromURLOptions struct {
+ // Specify the md5 calculated for the range of bytes that must be read from the copy source.
+ SourceContentMD5 []byte
+ // Specify the crc64 calculated for the range of bytes that must be read from the copy source.
+ SourceContentCRC64 []byte
+ // Specify the transactional md5 for the body, to be validated by the service.
+ TransactionalContentMD5 []byte
+
+ AppendPositionAccessConditions *AppendPositionAccessConditions
+
+ CpkInfo *CpkInfo
+
+ CpkScopeInfo *CpkScopeInfo
+
+ SourceModifiedAccessConditions *SourceModifiedAccessConditions
+
+ BlobAccessConditions *BlobAccessConditions
+ // Optional, you can specify whether a particular range of the blob is read
+ Offset *int64
+
+ Count *int64
+}
+
+func (o *AppendBlobAppendBlockFromURLOptions) format() (*appendBlobClientAppendBlockFromURLOptions, *CpkInfo, *CpkScopeInfo, *LeaseAccessConditions, *AppendPositionAccessConditions, *ModifiedAccessConditions, *SourceModifiedAccessConditions) {
+ if o == nil {
+ return nil, nil, nil, nil, nil, nil, nil
+ }
+
+ options := &appendBlobClientAppendBlockFromURLOptions{
+ SourceRange: getSourceRange(o.Offset, o.Count),
+ SourceContentMD5: o.SourceContentMD5,
+ SourceContentcrc64: o.SourceContentCRC64,
+ TransactionalContentMD5: o.TransactionalContentMD5,
+ }
+
+ leaseAccessConditions, modifiedAccessConditions := o.BlobAccessConditions.format()
+ return options, o.CpkInfo, o.CpkScopeInfo, leaseAccessConditions, o.AppendPositionAccessConditions, modifiedAccessConditions, o.SourceModifiedAccessConditions
+}
+
+// AppendBlobAppendBlockFromURLResponse contains the response from method AppendBlobClient.AppendBlockFromURL.
+type AppendBlobAppendBlockFromURLResponse struct {
+ appendBlobClientAppendBlockFromURLResponse
+}
+
+func toAppendBlobAppendBlockFromURLResponse(resp appendBlobClientAppendBlockFromURLResponse) AppendBlobAppendBlockFromURLResponse {
+ return AppendBlobAppendBlockFromURLResponse{resp}
+}
+
+// ---------------------------------------------------------------------------------------------------------------------
+
+// AppendBlobSealOptions provides set of configurations for SealAppendBlob operation
+type AppendBlobSealOptions struct {
+ BlobAccessConditions *BlobAccessConditions
+ AppendPositionAccessConditions *AppendPositionAccessConditions
+}
+
+func (o *AppendBlobSealOptions) format() (leaseAccessConditions *LeaseAccessConditions,
+ modifiedAccessConditions *ModifiedAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions) {
+ if o == nil {
+ return nil, nil, nil
+ }
+
+ return
+}
+
+// AppendBlobSealResponse contains the response from method AppendBlobClient.Seal.
+type AppendBlobSealResponse struct {
+ appendBlobClientSealResponse
+}
+
+func toAppendBlobSealResponse(resp appendBlobClientSealResponse) AppendBlobSealResponse {
+ return AppendBlobSealResponse{resp}
+}
+
+// ---------------------------------------------------------------------------------------------------------------------
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_append_blob_request_options.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_append_blob_request_options.go
deleted file mode 100644
index 9ea0047914c..00000000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_append_blob_request_options.go
+++ /dev/null
@@ -1,116 +0,0 @@
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License.
-
-package azblob
-
-type CreateAppendBlobOptions struct {
- BlobAccessConditions *BlobAccessConditions
-
- HTTPHeaders *BlobHTTPHeaders
-
- CpkInfo *CpkInfo
-
- CpkScopeInfo *CpkScopeInfo
- // Optional. Used to set blob tags in various blob operations.
- TagsMap map[string]string
- // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the
- // operation will copy the metadata from the source blob or file to the destination blob. If one or more name-value pairs
- // are specified, the destination blob is created with the specified metadata, and metadata is not copied from the source
- // blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers.
- // See Naming and Referencing Containers, Blobs, and Metadata for more information.
- Metadata map[string]string
- // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
- RequestID *string
-
- Timeout *int32
-}
-
-func (o *CreateAppendBlobOptions) pointers() (*AppendBlobCreateOptions, *BlobHTTPHeaders, *LeaseAccessConditions, *CpkInfo, *CpkScopeInfo, *ModifiedAccessConditions) {
- if o == nil {
- return nil, nil, nil, nil, nil, nil
- }
-
- options := AppendBlobCreateOptions{
- BlobTagsString: serializeBlobTagsToStrPtr(o.TagsMap),
- Metadata: o.Metadata,
- RequestID: o.RequestID,
- Timeout: o.Timeout,
- }
-
- leaseAccessConditions, modifiedAccessConditions := o.BlobAccessConditions.pointers()
- return &options, o.HTTPHeaders, leaseAccessConditions, o.CpkInfo, o.CpkScopeInfo, modifiedAccessConditions
-}
-
-type AppendBlockOptions struct {
- // Specify the transactional crc64 for the body, to be validated by the service.
- TransactionalContentCRC64 []byte
- // Specify the transactional md5 for the body, to be validated by the service.
- TransactionalContentMD5 []byte
-
- AppendPositionAccessConditions *AppendPositionAccessConditions
- CpkInfo *CpkInfo
- CpkScopeInfo *CpkScopeInfo
- BlobAccessConditions *BlobAccessConditions
-}
-
-func (o *AppendBlockOptions) pointers() (*AppendBlobAppendBlockOptions, *AppendPositionAccessConditions, *CpkInfo, *CpkScopeInfo, *ModifiedAccessConditions, *LeaseAccessConditions) {
- if o == nil {
- return nil, nil, nil, nil, nil, nil
- }
-
- options := &AppendBlobAppendBlockOptions{
- TransactionalContentCRC64: o.TransactionalContentCRC64,
- TransactionalContentMD5: o.TransactionalContentMD5,
- }
- leaseAccessConditions, modifiedAccessConditions := o.BlobAccessConditions.pointers()
- return options, o.AppendPositionAccessConditions, o.CpkInfo, o.CpkScopeInfo, modifiedAccessConditions, leaseAccessConditions
-}
-
-type AppendBlockURLOptions struct {
- // Specify the md5 calculated for the range of bytes that must be read from the copy source.
- SourceContentMD5 []byte
- // Specify the crc64 calculated for the range of bytes that must be read from the copy source.
- SourceContentCRC64 []byte
- // Specify the transactional md5 for the body, to be validated by the service.
- TransactionalContentMD5 []byte
-
- AppendPositionAccessConditions *AppendPositionAccessConditions
- CpkInfo *CpkInfo
- CpkScopeInfo *CpkScopeInfo
- SourceModifiedAccessConditions *SourceModifiedAccessConditions
- BlobAccessConditions *BlobAccessConditions
- // Optional, you can specify whether a particular range of the blob is read
- Offset *int64
- Count *int64
-}
-
-func (o *AppendBlockURLOptions) pointers() (*AppendBlobAppendBlockFromURLOptions, *AppendPositionAccessConditions, *CpkInfo, *CpkScopeInfo, *ModifiedAccessConditions, *LeaseAccessConditions, *SourceModifiedAccessConditions) {
- if o == nil {
- return nil, nil, nil, nil, nil, nil, nil
- }
-
- options := &AppendBlobAppendBlockFromURLOptions{
- SourceRange: getSourceRange(o.Offset, o.Count),
- SourceContentMD5: o.SourceContentMD5,
- SourceContentcrc64: o.SourceContentCRC64,
- TransactionalContentMD5: o.TransactionalContentMD5,
- }
-
- leaseAccessConditions, modifiedAccessConditions := o.BlobAccessConditions.pointers()
- return options, o.AppendPositionAccessConditions, o.CpkInfo, o.CpkScopeInfo, modifiedAccessConditions,
- leaseAccessConditions, o.SourceModifiedAccessConditions
-}
-
-type SealAppendBlobOptions struct {
- BlobAccessConditions *BlobAccessConditions
- AppendPositionAccessConditions *AppendPositionAccessConditions
-}
-
-func (o *SealAppendBlobOptions) pointers() (leaseAccessConditions *LeaseAccessConditions,
- modifiedAccessConditions *ModifiedAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions) {
- if o == nil {
- return nil, nil, nil
- }
-
- return
-}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_blob_client_util.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_blob_client_util.go
new file mode 100644
index 00000000000..f4425b18c82
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_blob_client_util.go
@@ -0,0 +1,478 @@
+//go:build go1.18
+// +build go1.18
+
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+package azblob
+
+import (
+ "context"
+ "io"
+ "net/http"
+ "time"
+)
+
+// ---------------------------------------------------------------------------------------------------------------------
+
+// BlobDownloadOptions provides set of configurations for Download blob operation
+type BlobDownloadOptions struct {
+ // When set to true and specified together with the Range, the service returns the MD5 hash for the range, as long as the
+ // range is less than or equal to 4 MB in size.
+ RangeGetContentMD5 *bool
+
+ // Optional, you can specify whether a particular range of the blob is read
+ Offset *int64
+ Count *int64
+
+ BlobAccessConditions *BlobAccessConditions
+ CpkInfo *CpkInfo
+ CpkScopeInfo *CpkScopeInfo
+}
+
+func (o *BlobDownloadOptions) format() (*blobClientDownloadOptions, *LeaseAccessConditions, *CpkInfo, *ModifiedAccessConditions) {
+ if o == nil {
+ return nil, nil, nil, nil
+ }
+
+ offset := int64(0)
+ count := int64(CountToEnd)
+
+ if o.Offset != nil {
+ offset = *o.Offset
+ }
+
+ if o.Count != nil {
+ count = *o.Count
+ }
+
+ basics := blobClientDownloadOptions{
+ RangeGetContentMD5: o.RangeGetContentMD5,
+ Range: (&HttpRange{Offset: offset, Count: count}).format(),
+ }
+
+ leaseAccessConditions, modifiedAccessConditions := o.BlobAccessConditions.format()
+ return &basics, leaseAccessConditions, o.CpkInfo, modifiedAccessConditions
+}
+
+// BlobDownloadResponse wraps AutoRest generated BlobDownloadResponse and helps to provide info for retry.
+type BlobDownloadResponse struct {
+ blobClientDownloadResponse
+ ctx context.Context
+ b *BlobClient
+ getInfo HTTPGetterInfo
+ ObjectReplicationRules []ObjectReplicationPolicy
+}
+
+// Body constructs new RetryReader stream for reading data. If a connection fails
+// while reading, it will make additional requests to reestablish a connection and
+// continue reading. Specifying a RetryReaderOption's with MaxRetryRequests set to 0
+// (the default), returns the original response body and no retries will be performed.
+// Pass in nil for options to accept the default options.
+func (r *BlobDownloadResponse) Body(options *RetryReaderOptions) io.ReadCloser {
+ if options == nil {
+ options = &RetryReaderOptions{}
+ }
+
+ if options.MaxRetryRequests == 0 { // No additional retries
+ return r.RawResponse.Body
+ }
+ return NewRetryReader(r.ctx, r.RawResponse, r.getInfo, *options,
+ func(ctx context.Context, getInfo HTTPGetterInfo) (*http.Response, error) {
+ accessConditions := &BlobAccessConditions{
+ ModifiedAccessConditions: &ModifiedAccessConditions{IfMatch: &getInfo.ETag},
+ }
+ options := BlobDownloadOptions{
+ Offset: &getInfo.Offset,
+ Count: &getInfo.Count,
+ BlobAccessConditions: accessConditions,
+ CpkInfo: options.CpkInfo,
+ //CpkScopeInfo: o.CpkScopeInfo,
+ }
+ resp, err := r.b.Download(ctx, &options)
+ if err != nil {
+ return nil, err
+ }
+ return resp.RawResponse, err
+ },
+ )
+}
+
+// ---------------------------------------------------------------------------------------------------------------------
+
+// BlobDeleteOptions provides set of configurations for Delete blob operation
+type BlobDeleteOptions struct {
+ // Required if the blob has associated snapshots. Specify one of the following two options: include: Delete the base blob
+ // and all of its snapshots. only: Delete only the blob's snapshots and not the blob itself
+ DeleteSnapshots *DeleteSnapshotsOptionType
+ BlobAccessConditions *BlobAccessConditions
+}
+
+func (o *BlobDeleteOptions) format() (*blobClientDeleteOptions, *LeaseAccessConditions, *ModifiedAccessConditions) {
+ if o == nil {
+ return nil, nil, nil
+ }
+
+ basics := blobClientDeleteOptions{
+ DeleteSnapshots: o.DeleteSnapshots,
+ }
+
+ if o.BlobAccessConditions == nil {
+ return &basics, nil, nil
+ }
+
+ return &basics, o.BlobAccessConditions.LeaseAccessConditions, o.BlobAccessConditions.ModifiedAccessConditions
+}
+
+// BlobDeleteResponse contains the response from method BlobClient.Delete.
+type BlobDeleteResponse struct {
+ blobClientDeleteResponse
+}
+
+func toBlobDeleteResponse(resp blobClientDeleteResponse) BlobDeleteResponse {
+ return BlobDeleteResponse{resp}
+}
+
+// ---------------------------------------------------------------------------------------------------------------------
+
+// BlobUndeleteOptions provides set of configurations for Blob Undelete operation
+type BlobUndeleteOptions struct {
+}
+
+func (o *BlobUndeleteOptions) format() *blobClientUndeleteOptions {
+ return nil
+}
+
+// BlobUndeleteResponse contains the response from method BlobClient.Undelete.
+type BlobUndeleteResponse struct {
+ blobClientUndeleteResponse
+}
+
+func toBlobUndeleteResponse(resp blobClientUndeleteResponse) BlobUndeleteResponse {
+ return BlobUndeleteResponse{resp}
+}
+
+// ---------------------------------------------------------------------------------------------------------------------
+
+// BlobSetTierOptions provides set of configurations for SetTier on blob operation
+type BlobSetTierOptions struct {
+ // Optional: Indicates the priority with which to rehydrate an archived blob.
+ RehydratePriority *RehydratePriority
+
+ LeaseAccessConditions *LeaseAccessConditions
+ ModifiedAccessConditions *ModifiedAccessConditions
+}
+
+func (o *BlobSetTierOptions) format() (*blobClientSetTierOptions, *LeaseAccessConditions, *ModifiedAccessConditions) {
+ if o == nil {
+ return nil, nil, nil
+ }
+
+ basics := blobClientSetTierOptions{RehydratePriority: o.RehydratePriority}
+ return &basics, o.LeaseAccessConditions, o.ModifiedAccessConditions
+}
+
+// BlobSetTierResponse contains the response from method BlobClient.SetTier.
+type BlobSetTierResponse struct {
+ blobClientSetTierResponse
+}
+
+func toBlobSetTierResponse(resp blobClientSetTierResponse) BlobSetTierResponse {
+ return BlobSetTierResponse{resp}
+}
+
+// ---------------------------------------------------------------------------------------------------------------------
+
+// BlobGetPropertiesOptions provides set of configurations for GetProperties blob operation
+type BlobGetPropertiesOptions struct {
+ BlobAccessConditions *BlobAccessConditions
+ CpkInfo *CpkInfo
+}
+
+func (o *BlobGetPropertiesOptions) format() (blobClientGetPropertiesOptions *blobClientGetPropertiesOptions,
+ leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, modifiedAccessConditions *ModifiedAccessConditions) {
+ if o == nil {
+ return nil, nil, nil, nil
+ }
+
+ leaseAccessConditions, modifiedAccessConditions = o.BlobAccessConditions.format()
+ return nil, leaseAccessConditions, o.CpkInfo, modifiedAccessConditions
+}
+
+// ObjectReplicationRules struct
+type ObjectReplicationRules struct {
+ RuleId string
+ Status string
+}
+
+// ObjectReplicationPolicy are deserialized attributes
+type ObjectReplicationPolicy struct {
+ PolicyId *string
+ Rules *[]ObjectReplicationRules
+}
+
+// BlobGetPropertiesResponse reformat the GetPropertiesResponse object for easy consumption
+type BlobGetPropertiesResponse struct {
+ blobClientGetPropertiesResponse
+
+ // deserialized attributes
+ ObjectReplicationRules []ObjectReplicationPolicy
+}
+
+func toGetBlobPropertiesResponse(resp blobClientGetPropertiesResponse) BlobGetPropertiesResponse {
+ getResp := BlobGetPropertiesResponse{
+ blobClientGetPropertiesResponse: resp,
+ ObjectReplicationRules: deserializeORSPolicies(resp.ObjectReplicationRules),
+ }
+ return getResp
+}
+
+// ---------------------------------------------------------------------------------------------------------------------
+
+// BlobSetHTTPHeadersOptions provides set of configurations for SetHTTPHeaders on blob operation
+type BlobSetHTTPHeadersOptions struct {
+ LeaseAccessConditions *LeaseAccessConditions
+ ModifiedAccessConditions *ModifiedAccessConditions
+}
+
+func (o *BlobSetHTTPHeadersOptions) format() (*blobClientSetHTTPHeadersOptions, *LeaseAccessConditions, *ModifiedAccessConditions) {
+ if o == nil {
+ return nil, nil, nil
+ }
+
+ return nil, o.LeaseAccessConditions, o.ModifiedAccessConditions
+}
+
+// BlobSetHTTPHeadersResponse contains the response from method BlobClient.SetHTTPHeaders.
+type BlobSetHTTPHeadersResponse struct {
+ blobClientSetHTTPHeadersResponse
+}
+
+func toBlobSetHTTPHeadersResponse(resp blobClientSetHTTPHeadersResponse) BlobSetHTTPHeadersResponse {
+ return BlobSetHTTPHeadersResponse{resp}
+}
+
+// ---------------------------------------------------------------------------------------------------------------------
+
+// BlobSetMetadataOptions provides set of configurations for Set Metadata on blob operation
+type BlobSetMetadataOptions struct {
+ LeaseAccessConditions *LeaseAccessConditions
+ CpkInfo *CpkInfo
+ CpkScopeInfo *CpkScopeInfo
+ ModifiedAccessConditions *ModifiedAccessConditions
+}
+
+func (o *BlobSetMetadataOptions) format() (leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo,
+ cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) {
+ if o == nil {
+ return nil, nil, nil, nil
+ }
+
+ return o.LeaseAccessConditions, o.CpkInfo, o.CpkScopeInfo, o.ModifiedAccessConditions
+}
+
+// BlobSetMetadataResponse contains the response from method BlobClient.SetMetadata.
+type BlobSetMetadataResponse struct {
+ blobClientSetMetadataResponse
+}
+
+func toBlobSetMetadataResponse(resp blobClientSetMetadataResponse) BlobSetMetadataResponse {
+ return BlobSetMetadataResponse{resp}
+}
+
+// ---------------------------------------------------------------------------------------------------------------------
+
+// BlobCreateSnapshotOptions provides set of configurations for CreateSnapshot of blob operation
+type BlobCreateSnapshotOptions struct {
+ Metadata map[string]string
+ LeaseAccessConditions *LeaseAccessConditions
+ CpkInfo *CpkInfo
+ CpkScopeInfo *CpkScopeInfo
+ ModifiedAccessConditions *ModifiedAccessConditions
+}
+
+func (o *BlobCreateSnapshotOptions) format() (blobSetMetadataOptions *blobClientCreateSnapshotOptions, cpkInfo *CpkInfo,
+ cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) {
+ if o == nil {
+ return nil, nil, nil, nil, nil
+ }
+
+ basics := blobClientCreateSnapshotOptions{
+ Metadata: o.Metadata,
+ }
+
+ return &basics, o.CpkInfo, o.CpkScopeInfo, o.ModifiedAccessConditions, o.LeaseAccessConditions
+}
+
+// BlobCreateSnapshotResponse contains the response from method BlobClient.CreateSnapshot
+type BlobCreateSnapshotResponse struct {
+ blobClientCreateSnapshotResponse
+}
+
+func toBlobCreateSnapshotResponse(resp blobClientCreateSnapshotResponse) BlobCreateSnapshotResponse {
+ return BlobCreateSnapshotResponse{resp}
+}
+
+// ---------------------------------------------------------------------------------------------------------------------
+
+// BlobStartCopyOptions provides set of configurations for StartCopyFromURL blob operation
+type BlobStartCopyOptions struct {
+ // Specifies the date time when the blobs immutability policy is set to expire.
+ ImmutabilityPolicyExpiry *time.Time
+ // Specifies the immutability policy mode to set on the blob.
+ ImmutabilityPolicyMode *BlobImmutabilityPolicyMode
+ // Specified if a legal hold should be set on the blob.
+ LegalHold *bool
+ // Optional. Used to set blob tags in various blob operations.
+ TagsMap map[string]string
+ // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the
+ // operation will copy the metadata from the source blob or file to the destination blob. If one or more name-value pairs
+ // are specified, the destination blob is created with the specified metadata, and metadata is not copied from the source
+ // blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers.
+ // See Naming and Referencing Containers, Blobs, and Metadata for more information.
+ Metadata map[string]string
+ // Optional: Indicates the priority with which to rehydrate an archived blob.
+ RehydratePriority *RehydratePriority
+ // Overrides the sealed state of the destination blob. Service version 2019-12-12 and newer.
+ SealBlob *bool
+ // Optional. Indicates the tier to be set on the blob.
+ Tier *AccessTier
+
+ SourceModifiedAccessConditions *SourceModifiedAccessConditions
+
+ ModifiedAccessConditions *ModifiedAccessConditions
+
+ LeaseAccessConditions *LeaseAccessConditions
+}
+
+func (o *BlobStartCopyOptions) format() (blobStartCopyFromUrlOptions *blobClientStartCopyFromURLOptions,
+ sourceModifiedAccessConditions *SourceModifiedAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) {
+ if o == nil {
+ return nil, nil, nil, nil
+ }
+
+ basics := blobClientStartCopyFromURLOptions{
+ BlobTagsString: serializeBlobTagsToStrPtr(o.TagsMap),
+ Metadata: o.Metadata,
+ RehydratePriority: o.RehydratePriority,
+ SealBlob: o.SealBlob,
+ Tier: o.Tier,
+ ImmutabilityPolicyExpiry: o.ImmutabilityPolicyExpiry,
+ ImmutabilityPolicyMode: o.ImmutabilityPolicyMode,
+ LegalHold: o.LegalHold,
+ }
+
+ return &basics, o.SourceModifiedAccessConditions, o.ModifiedAccessConditions, o.LeaseAccessConditions
+}
+
+// BlobStartCopyFromURLResponse contains the response from method BlobClient.StartCopyFromURL.
+type BlobStartCopyFromURLResponse struct {
+ blobClientStartCopyFromURLResponse
+}
+
+func toBlobStartCopyFromURLResponse(resp blobClientStartCopyFromURLResponse) BlobStartCopyFromURLResponse {
+ return BlobStartCopyFromURLResponse{resp}
+}
+
+// ---------------------------------------------------------------------------------------------------------------------
+
+// BlobAbortCopyOptions provides set of configurations for AbortCopyFromURL operation
+type BlobAbortCopyOptions struct {
+ LeaseAccessConditions *LeaseAccessConditions
+}
+
+func (o *BlobAbortCopyOptions) format() (blobAbortCopyFromUrlOptions *blobClientAbortCopyFromURLOptions,
+ leaseAccessConditions *LeaseAccessConditions) {
+ if o == nil {
+ return nil, nil
+ }
+ return nil, o.LeaseAccessConditions
+}
+
+// BlobAbortCopyFromURLResponse contains the response from method BlobClient.AbortCopyFromURL
+type BlobAbortCopyFromURLResponse struct {
+ blobClientAbortCopyFromURLResponse
+}
+
+func toBlobAbortCopyFromURLResponse(resp blobClientAbortCopyFromURLResponse) BlobAbortCopyFromURLResponse {
+ return BlobAbortCopyFromURLResponse{resp}
+}
+
+// ---------------------------------------------------------------------------------------------------------------------
+
+// BlobSetTagsOptions provides set of configurations for SetTags operation
+type BlobSetTagsOptions struct {
+ // The version id parameter is an opaque DateTime value that, when present,
+ // specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer.
+ VersionID *string
+ // Optional header, Specifies the transactional crc64 for the body, to be validated by the service.
+ TransactionalContentCRC64 []byte
+ // Optional header, Specifies the transactional md5 for the body, to be validated by the service.
+ TransactionalContentMD5 []byte
+
+ TagsMap map[string]string
+
+ ModifiedAccessConditions *ModifiedAccessConditions
+ LeaseAccessConditions *LeaseAccessConditions
+}
+
+func (o *BlobSetTagsOptions) format() (*blobClientSetTagsOptions, *ModifiedAccessConditions, *LeaseAccessConditions) {
+ if o == nil {
+ return nil, nil, nil
+ }
+
+ options := &blobClientSetTagsOptions{
+ Tags: serializeBlobTags(o.TagsMap),
+ TransactionalContentMD5: o.TransactionalContentMD5,
+ TransactionalContentCRC64: o.TransactionalContentCRC64,
+ VersionID: o.VersionID,
+ }
+
+ return options, o.ModifiedAccessConditions, o.LeaseAccessConditions
+}
+
+// BlobSetTagsResponse contains the response from method BlobClient.SetTags
+type BlobSetTagsResponse struct {
+ blobClientSetTagsResponse
+}
+
+func toBlobSetTagsResponse(resp blobClientSetTagsResponse) BlobSetTagsResponse {
+ return BlobSetTagsResponse{resp}
+}
+
+// ---------------------------------------------------------------------------------------------------------------------
+
+// BlobGetTagsOptions provides set of configurations for GetTags operation
+type BlobGetTagsOptions struct {
+ // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve.
+ Snapshot *string
+ // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on.
+ // It's for service version 2019-10-10 and newer.
+ VersionID *string
+
+ BlobAccessConditions *BlobAccessConditions
+}
+
+func (o *BlobGetTagsOptions) format() (*blobClientGetTagsOptions, *ModifiedAccessConditions, *LeaseAccessConditions) {
+ if o == nil {
+ return nil, nil, nil
+ }
+
+ options := &blobClientGetTagsOptions{
+ Snapshot: o.Snapshot,
+ VersionID: o.VersionID,
+ }
+
+ leaseAccessConditions, modifiedAccessConditions := o.BlobAccessConditions.format()
+
+ return options, modifiedAccessConditions, leaseAccessConditions
+}
+
+// BlobGetTagsResponse contains the response from method BlobClient.GetTags
+type BlobGetTagsResponse struct {
+ blobClientGetTagsResponse
+}
+
+func toBlobGetTagsResponse(resp blobClientGetTagsResponse) BlobGetTagsResponse {
+ return BlobGetTagsResponse{resp}
+}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_blob_lease_client_util.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_blob_lease_client_util.go
new file mode 100644
index 00000000000..4e574622cca
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_blob_lease_client_util.go
@@ -0,0 +1,160 @@
+//go:build go1.18
+// +build go1.18
+
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+package azblob
+
+import (
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/to"
+ "github.com/Azure/azure-sdk-for-go/sdk/internal/uuid"
+)
+
+// ---------------------------------------------------------------------------------------------------------------------
+
+// BlobAcquireLeaseOptions provides set of configurations for AcquireLeaseBlob operation
+type BlobAcquireLeaseOptions struct {
+ // Specifies the Duration of the lease, in seconds, or negative one (-1) for a lease that never expires. A non-infinite lease
+ // can be between 15 and 60 seconds. A lease Duration cannot be changed using renew or change.
+ Duration *int32
+
+ ModifiedAccessConditions *ModifiedAccessConditions
+}
+
+func (o *BlobAcquireLeaseOptions) format() (blobClientAcquireLeaseOptions, *ModifiedAccessConditions) {
+ if o == nil {
+ return blobClientAcquireLeaseOptions{}, nil
+ }
+ return blobClientAcquireLeaseOptions{
+ Duration: o.Duration,
+ }, o.ModifiedAccessConditions
+}
+
+// BlobAcquireLeaseResponse contains the response from method BlobLeaseClient.AcquireLease.
+type BlobAcquireLeaseResponse struct {
+ blobClientAcquireLeaseResponse
+}
+
+func toBlobAcquireLeaseResponse(resp blobClientAcquireLeaseResponse) BlobAcquireLeaseResponse {
+ return BlobAcquireLeaseResponse{resp}
+}
+
+// ---------------------------------------------------------------------------------------------------------------------
+
+// BlobBreakLeaseOptions provides set of configurations for BreakLeaseBlob operation
+type BlobBreakLeaseOptions struct {
+ // For a break operation, proposed Duration the lease should continue before it is broken, in seconds, between 0 and 60. This
+ // break period is only used if it is shorter than the time remaining on the lease. If longer, the time remaining on the lease
+ // is used. A new lease will not be available before the break period has expired, but the lease may be held for longer than
+ // the break period. If this header does not appear with a break operation, a fixed-Duration lease breaks after the remaining
+ // lease period elapses, and an infinite lease breaks immediately.
+ BreakPeriod *int32
+ ModifiedAccessConditions *ModifiedAccessConditions
+}
+
+func (o *BlobBreakLeaseOptions) format() (*blobClientBreakLeaseOptions, *ModifiedAccessConditions) {
+ if o == nil {
+ return nil, nil
+ }
+
+ if o.BreakPeriod != nil {
+ period := leasePeriodPointer(*o.BreakPeriod)
+ return &blobClientBreakLeaseOptions{
+ BreakPeriod: period,
+ }, o.ModifiedAccessConditions
+ }
+
+ return nil, o.ModifiedAccessConditions
+}
+
+// BlobBreakLeaseResponse contains the response from method BlobLeaseClient.BreakLease.
+type BlobBreakLeaseResponse struct {
+ blobClientBreakLeaseResponse
+}
+
+func toBlobBreakLeaseResponse(resp blobClientBreakLeaseResponse) BlobBreakLeaseResponse {
+ return BlobBreakLeaseResponse{resp}
+}
+
+// ---------------------------------------------------------------------------------------------------------------------
+
+// BlobChangeLeaseOptions provides set of configurations for ChangeLeaseBlob operation
+type BlobChangeLeaseOptions struct {
+ ProposedLeaseID *string
+ ModifiedAccessConditions *ModifiedAccessConditions
+}
+
+func (o *BlobChangeLeaseOptions) format() (*string, *blobClientChangeLeaseOptions, *ModifiedAccessConditions, error) {
+ generatedUuid, err := uuid.New()
+ if err != nil {
+ return nil, nil, nil, err
+ }
+ leaseID := to.Ptr(generatedUuid.String())
+ if o == nil {
+ return leaseID, nil, nil, nil
+ }
+
+ if o.ProposedLeaseID == nil {
+ o.ProposedLeaseID = leaseID
+ }
+
+ return o.ProposedLeaseID, nil, o.ModifiedAccessConditions, nil
+}
+
+// BlobChangeLeaseResponse contains the response from method BlobLeaseClient.ChangeLease
+type BlobChangeLeaseResponse struct {
+ blobClientChangeLeaseResponse
+}
+
+func toBlobChangeLeaseResponse(resp blobClientChangeLeaseResponse) BlobChangeLeaseResponse {
+ return BlobChangeLeaseResponse{resp}
+}
+
+// ---------------------------------------------------------------------------------------------------------------------
+
+// BlobRenewLeaseOptions provides set of configurations for RenewLeaseBlob operation
+type BlobRenewLeaseOptions struct {
+ ModifiedAccessConditions *ModifiedAccessConditions
+}
+
+func (o *BlobRenewLeaseOptions) format() (*blobClientRenewLeaseOptions, *ModifiedAccessConditions) {
+ if o == nil {
+ return nil, nil
+ }
+
+ return nil, o.ModifiedAccessConditions
+}
+
+// BlobRenewLeaseResponse contains the response from method BlobClient.RenewLease.
+type BlobRenewLeaseResponse struct {
+ blobClientRenewLeaseResponse
+}
+
+func toBlobRenewLeaseResponse(resp blobClientRenewLeaseResponse) BlobRenewLeaseResponse {
+ return BlobRenewLeaseResponse{resp}
+}
+
+// ---------------------------------------------------------------------------------------------------------------------
+
+// ReleaseLeaseBlobOptions provides set of configurations for ReleaseLeaseBlob operation
+type ReleaseLeaseBlobOptions struct {
+ ModifiedAccessConditions *ModifiedAccessConditions
+}
+
+func (o *ReleaseLeaseBlobOptions) format() (*blobClientReleaseLeaseOptions, *ModifiedAccessConditions) {
+ if o == nil {
+ return nil, nil
+ }
+
+ return nil, o.ModifiedAccessConditions
+}
+
+// BlobReleaseLeaseResponse contains the response from method BlobClient.ReleaseLease.
+type BlobReleaseLeaseResponse struct {
+ blobClientReleaseLeaseResponse
+}
+
+func toBlobReleaseLeaseResponse(resp blobClientReleaseLeaseResponse) BlobReleaseLeaseResponse {
+ return BlobReleaseLeaseResponse{resp}
+}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_blob_request_options.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_blob_request_options.go
deleted file mode 100644
index d7621afc3b5..00000000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_blob_request_options.go
+++ /dev/null
@@ -1,324 +0,0 @@
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License.
-
-package azblob
-
-import (
- "net/url"
- "strings"
-)
-
-type DeleteBlobOptions struct {
- // Required if the blob has associated snapshots. Specify one of the following two options: include: Delete the base blob
- // and all of its snapshots. only: Delete only the blob's snapshots and not the blob itself
- DeleteSnapshots *DeleteSnapshotsOptionType
- BlobAccessConditions *BlobAccessConditions
-}
-
-func (o *DeleteBlobOptions) pointers() (*BlobDeleteOptions, *LeaseAccessConditions, *ModifiedAccessConditions) {
- if o == nil {
- return nil, nil, nil
- }
-
- basics := BlobDeleteOptions{
- DeleteSnapshots: o.DeleteSnapshots,
- }
-
- if o.BlobAccessConditions == nil {
- return &basics, nil, nil
- }
-
- return &basics, o.BlobAccessConditions.LeaseAccessConditions, o.BlobAccessConditions.ModifiedAccessConditions
-}
-
-type DownloadBlobOptions struct {
- // When set to true and specified together with the Range, the service returns the MD5 hash for the range, as long as the
- // range is less than or equal to 4 MB in size.
- RangeGetContentMD5 *bool
-
- // Optional, you can specify whether a particular range of the blob is read
- Offset *int64
- Count *int64
-
- BlobAccessConditions *BlobAccessConditions
- CpkInfo *CpkInfo
- CpkScopeInfo *CpkScopeInfo
-}
-
-func (o *DownloadBlobOptions) pointers() (blobDownloadOptions *BlobDownloadOptions,
- leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, modifiedAccessConditions *ModifiedAccessConditions) {
- if o == nil {
- return nil, nil, nil, nil
- }
-
- offset := int64(0)
- count := int64(CountToEnd)
-
- if o.Offset != nil {
- offset = *o.Offset
- }
-
- if o.Count != nil {
- count = *o.Count
- }
-
- basics := BlobDownloadOptions{
- RangeGetContentMD5: o.RangeGetContentMD5,
- Range: HttpRange{
- Offset: offset,
- Count: count,
- }.pointers(),
- }
- leaseAccessConditions, modifiedAccessConditions = o.BlobAccessConditions.pointers()
- return &basics, leaseAccessConditions, o.CpkInfo, modifiedAccessConditions
-}
-
-type SetTierOptions struct {
- // Optional: Indicates the priority with which to rehydrate an archived blob.
- RehydratePriority *RehydratePriority
-
- LeaseAccessConditions *LeaseAccessConditions
- ModifiedAccessConditions *ModifiedAccessConditions
-}
-
-func (o *SetTierOptions) pointers() (blobSetTierOptions *BlobSetTierOptions,
- leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) {
- if o == nil {
- return nil, nil, nil
- }
-
- basics := BlobSetTierOptions{RehydratePriority: o.RehydratePriority}
- return &basics, o.LeaseAccessConditions, o.ModifiedAccessConditions
-}
-
-type GetBlobPropertiesOptions struct {
- BlobAccessConditions *BlobAccessConditions
- CpkInfo *CpkInfo
-}
-
-func (o *GetBlobPropertiesOptions) pointers() (blobGetPropertiesOptions *BlobGetPropertiesOptions,
- leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, modifiedAccessConditions *ModifiedAccessConditions) {
- if o == nil {
- return nil, nil, nil, nil
- }
-
- leaseAccessConditions, modifiedAccessConditions = o.BlobAccessConditions.pointers()
- return nil, leaseAccessConditions, o.CpkInfo, modifiedAccessConditions
-}
-
-type SetBlobHTTPHeadersOptions struct {
- LeaseAccessConditions *LeaseAccessConditions
- ModifiedAccessConditions *ModifiedAccessConditions
-}
-
-func (o *SetBlobHTTPHeadersOptions) pointers() (blobSetHttpHeadersOptions *BlobSetHTTPHeadersOptions,
- leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) {
- if o == nil {
- return nil, nil, nil
- }
-
- return nil, o.LeaseAccessConditions, o.ModifiedAccessConditions
-}
-
-type SetBlobMetadataOptions struct {
- LeaseAccessConditions *LeaseAccessConditions
- CpkInfo *CpkInfo
- CpkScopeInfo *CpkScopeInfo
- ModifiedAccessConditions *ModifiedAccessConditions
-}
-
-func (o *SetBlobMetadataOptions) pointers() (leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo,
- cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) {
- if o == nil {
- return nil, nil, nil, nil
- }
-
- return o.LeaseAccessConditions, o.CpkInfo, o.CpkScopeInfo, o.ModifiedAccessConditions
-}
-
-type CreateBlobSnapshotOptions struct {
- Metadata map[string]string
- LeaseAccessConditions *LeaseAccessConditions
- CpkInfo *CpkInfo
- CpkScopeInfo *CpkScopeInfo
- ModifiedAccessConditions *ModifiedAccessConditions
-}
-
-func (o *CreateBlobSnapshotOptions) pointers() (blobSetMetadataOptions *BlobCreateSnapshotOptions, cpkInfo *CpkInfo,
- cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) {
- if o == nil {
- return nil, nil, nil, nil, nil
- }
-
- basics := BlobCreateSnapshotOptions{
- Metadata: o.Metadata,
- }
-
- return &basics, o.CpkInfo, o.CpkScopeInfo, o.ModifiedAccessConditions, o.LeaseAccessConditions
-}
-
-type StartCopyBlobOptions struct {
- // Optional. Used to set blob tags in various blob operations.
- TagsMap map[string]string
- // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the
- // operation will copy the metadata from the source blob or file to the destination blob. If one or more name-value pairs
- // are specified, the destination blob is created with the specified metadata, and metadata is not copied from the source
- // blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers.
- // See Naming and Referencing Containers, Blobs, and Metadata for more information.
- Metadata map[string]string
- // Optional: Indicates the priority with which to rehydrate an archived blob.
- RehydratePriority *RehydratePriority
- // Overrides the sealed state of the destination blob. Service version 2019-12-12 and newer.
- SealBlob *bool
- // Optional. Indicates the tier to be set on the blob.
- Tier *AccessTier
-
- SourceModifiedAccessConditions *SourceModifiedAccessConditions
- ModifiedAccessConditions *ModifiedAccessConditions
- LeaseAccessConditions *LeaseAccessConditions
-}
-
-func (o *StartCopyBlobOptions) pointers() (blobStartCopyFromUrlOptions *BlobStartCopyFromURLOptions,
- sourceModifiedAccessConditions *SourceModifiedAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) {
- if o == nil {
- return nil, nil, nil, nil
- }
-
- basics := BlobStartCopyFromURLOptions{
- BlobTagsString: serializeBlobTagsToStrPtr(o.TagsMap),
- Metadata: o.Metadata,
- RehydratePriority: o.RehydratePriority,
- SealBlob: o.SealBlob,
- Tier: o.Tier,
- }
-
- return &basics, o.SourceModifiedAccessConditions, o.ModifiedAccessConditions, o.LeaseAccessConditions
-}
-
-type AbortCopyBlobOptions struct {
- LeaseAccessConditions *LeaseAccessConditions
-}
-
-func (o *AbortCopyBlobOptions) pointers() (blobAbortCopyFromUrlOptions *BlobAbortCopyFromURLOptions,
- leaseAccessConditions *LeaseAccessConditions) {
- if o == nil {
- return nil, nil
- }
- return nil, o.LeaseAccessConditions
-}
-
-func serializeBlobTagsToStrPtr(tagsMap map[string]string) *string {
- if tagsMap == nil {
- return nil
- }
- tags := make([]string, 0)
- for key, val := range tagsMap {
- tags = append(tags, url.QueryEscape(key)+"="+url.QueryEscape(val))
- }
- //tags = tags[:len(tags)-1]
- blobTagsString := strings.Join(tags, "&")
- return &blobTagsString
-}
-
-func serializeBlobTags(tagsMap map[string]string) *BlobTags {
- if tagsMap == nil {
- return nil
- }
- blobTagSet := make([]*BlobTag, 0)
- for key, val := range tagsMap {
- newKey, newVal := key, val
- blobTagSet = append(blobTagSet, &BlobTag{Key: &newKey, Value: &newVal})
- }
- return &BlobTags{BlobTagSet: blobTagSet}
-}
-
-type SetTagsBlobOptions struct {
- // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
- RequestID *string
- // The timeout parameter is expressed in seconds.
- Timeout *int32
- // The version id parameter is an opaque DateTime value that, when present,
- // specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer.
- VersionID *string
- // Optional header, Specifies the transactional crc64 for the body, to be validated by the service.
- TransactionalContentCRC64 []byte
- // Optional header, Specifies the transactional md5 for the body, to be validated by the service.
- TransactionalContentMD5 []byte
-
- TagsMap map[string]string
-
- ModifiedAccessConditions *ModifiedAccessConditions
-}
-
-func (o *SetTagsBlobOptions) pointers() (*BlobSetTagsOptions, *ModifiedAccessConditions) {
- if o == nil {
- return nil, nil
- }
-
- options := &BlobSetTagsOptions{
- RequestID: o.RequestID,
- Tags: serializeBlobTags(o.TagsMap),
- Timeout: o.Timeout,
- TransactionalContentMD5: o.TransactionalContentMD5,
- TransactionalContentCRC64: o.TransactionalContentCRC64,
- VersionID: o.VersionID,
- }
-
- return options, o.ModifiedAccessConditions
-}
-
-type GetTagsBlobOptions struct {
- // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
- RequestID *string
- // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve.
- Snapshot *string
- // The timeout parameter is expressed in seconds.
- Timeout *int32
- // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on.
- // It's for service version 2019-10-10 and newer.
- VersionID *string
-
- ModifiedAccessConditions *ModifiedAccessConditions
-}
-
-func (o *GetTagsBlobOptions) pointers() (*BlobGetTagsOptions, *ModifiedAccessConditions) {
- if o == nil {
- return nil, nil
- }
-
- options := &BlobGetTagsOptions{
- RequestID: o.RequestID,
- Snapshot: o.Snapshot,
- Timeout: o.Timeout,
- VersionID: o.VersionID,
- }
-
- return options, o.ModifiedAccessConditions
-}
-
-type ObjectReplicationRules struct {
- RuleId string
- Status string
-}
-
-type ObjectReplicationPolicy struct {
- PolicyId *string
- Rules *[]ObjectReplicationRules
-}
-
-type GetBlobPropertiesResponse struct {
- BlobGetPropertiesResponse
-
- // deserialized attributes
- ObjectReplicationRules []ObjectReplicationPolicy
-}
-
-func (bgpr *BlobGetPropertiesResponse) deserializeAttributes() GetBlobPropertiesResponse {
- getResp := GetBlobPropertiesResponse{}
- if bgpr == nil {
- return getResp
- }
- getResp.BlobGetPropertiesResponse = *bgpr
- getResp.ObjectReplicationRules = deserializeORSPolicies(bgpr.ObjectReplicationRules)
- return getResp
-}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_block_blob_client_util.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_block_blob_client_util.go
new file mode 100644
index 00000000000..06d4368557a
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_block_blob_client_util.go
@@ -0,0 +1,272 @@
+//go:build go1.18
+// +build go1.18
+
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+package azblob
+
+import "time"
+
+// ---------------------------------------------------------------------------------------------------------------------
+
+// BlockBlobUploadOptions provides set of configurations for UploadBlockBlob operation
+type BlockBlobUploadOptions struct {
+ // Optional. Used to set blob tags in various blob operations.
+ TagsMap map[string]string
+
+ // Optional. Specifies a user-defined name-value pair associated with the blob.
+ Metadata map[string]string
+
+ // Optional. Indicates the tier to be set on the blob.
+ Tier *AccessTier
+
+ // Specify the transactional md5 for the body, to be validated by the service.
+ TransactionalContentMD5 []byte
+
+ HTTPHeaders *BlobHTTPHeaders
+ CpkInfo *CpkInfo
+ CpkScopeInfo *CpkScopeInfo
+ BlobAccessConditions *BlobAccessConditions
+}
+
+func (o *BlockBlobUploadOptions) format() (*blockBlobClientUploadOptions, *BlobHTTPHeaders, *LeaseAccessConditions,
+ *CpkInfo, *CpkScopeInfo, *ModifiedAccessConditions) {
+ if o == nil {
+ return nil, nil, nil, nil, nil, nil
+ }
+
+ basics := blockBlobClientUploadOptions{
+ BlobTagsString: serializeBlobTagsToStrPtr(o.TagsMap),
+ Metadata: o.Metadata,
+ Tier: o.Tier,
+ TransactionalContentMD5: o.TransactionalContentMD5,
+ }
+
+ leaseAccessConditions, modifiedAccessConditions := o.BlobAccessConditions.format()
+ return &basics, o.HTTPHeaders, leaseAccessConditions, o.CpkInfo, o.CpkScopeInfo, modifiedAccessConditions
+}
+
+// BlockBlobUploadResponse contains the response from method BlockBlobClient.Upload.
+type BlockBlobUploadResponse struct {
+ blockBlobClientUploadResponse
+}
+
+func toBlockBlobUploadResponse(resp blockBlobClientUploadResponse) BlockBlobUploadResponse {
+ return BlockBlobUploadResponse{resp}
+}
+
+// ---------------------------------------------------------------------------------------------------------------------
+
+// BlockBlobStageBlockOptions provides set of configurations for StageBlock operation
+type BlockBlobStageBlockOptions struct {
+ CpkInfo *CpkInfo
+
+ CpkScopeInfo *CpkScopeInfo
+
+ LeaseAccessConditions *LeaseAccessConditions
+ // Specify the transactional crc64 for the body, to be validated by the service.
+ TransactionalContentCRC64 []byte
+ // Specify the transactional md5 for the body, to be validated by the service.
+ TransactionalContentMD5 []byte
+}
+
+func (o *BlockBlobStageBlockOptions) format() (*blockBlobClientStageBlockOptions, *LeaseAccessConditions, *CpkInfo, *CpkScopeInfo) {
+ if o == nil {
+ return nil, nil, nil, nil
+ }
+
+ return &blockBlobClientStageBlockOptions{
+ TransactionalContentCRC64: o.TransactionalContentCRC64,
+ TransactionalContentMD5: o.TransactionalContentMD5,
+ }, o.LeaseAccessConditions, o.CpkInfo, o.CpkScopeInfo
+}
+
+// BlockBlobStageBlockResponse contains the response from method BlockBlobClient.StageBlock.
+type BlockBlobStageBlockResponse struct {
+ blockBlobClientStageBlockResponse
+}
+
+func toBlockBlobStageBlockResponse(resp blockBlobClientStageBlockResponse) BlockBlobStageBlockResponse {
+ return BlockBlobStageBlockResponse{resp}
+}
+
+// ---------------------------------------------------------------------------------------------------------------------
+
+// BlockBlobStageBlockFromURLOptions provides set of configurations for StageBlockFromURL operation
+type BlockBlobStageBlockFromURLOptions struct {
+ // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source.
+ CopySourceAuthorization *string
+
+ LeaseAccessConditions *LeaseAccessConditions
+
+ SourceModifiedAccessConditions *SourceModifiedAccessConditions
+ // Specify the md5 calculated for the range of bytes that must be read from the copy source.
+ SourceContentMD5 []byte
+ // Specify the crc64 calculated for the range of bytes that must be read from the copy source.
+ SourceContentCRC64 []byte
+
+ Offset *int64
+
+ Count *int64
+
+ CpkInfo *CpkInfo
+
+ CpkScopeInfo *CpkScopeInfo
+}
+
+func (o *BlockBlobStageBlockFromURLOptions) format() (*blockBlobClientStageBlockFromURLOptions, *CpkInfo, *CpkScopeInfo, *LeaseAccessConditions, *SourceModifiedAccessConditions) {
+ if o == nil {
+ return nil, nil, nil, nil, nil
+ }
+
+ options := &blockBlobClientStageBlockFromURLOptions{
+ CopySourceAuthorization: o.CopySourceAuthorization,
+ SourceContentMD5: o.SourceContentMD5,
+ SourceContentcrc64: o.SourceContentCRC64,
+ SourceRange: getSourceRange(o.Offset, o.Count),
+ }
+
+ return options, o.CpkInfo, o.CpkScopeInfo, o.LeaseAccessConditions, o.SourceModifiedAccessConditions
+}
+
+// BlockBlobStageBlockFromURLResponse contains the response from method BlockBlobClient.StageBlockFromURL.
+type BlockBlobStageBlockFromURLResponse struct {
+ blockBlobClientStageBlockFromURLResponse
+}
+
+func toBlockBlobStageBlockFromURLResponse(resp blockBlobClientStageBlockFromURLResponse) BlockBlobStageBlockFromURLResponse {
+ return BlockBlobStageBlockFromURLResponse{resp}
+}
+
+// ---------------------------------------------------------------------------------------------------------------------
+
+// BlockBlobCommitBlockListOptions provides set of configurations for CommitBlockList operation
+type BlockBlobCommitBlockListOptions struct {
+ BlobTagsMap map[string]string
+ Metadata map[string]string
+ RequestID *string
+ Tier *AccessTier
+ Timeout *int32
+ TransactionalContentCRC64 []byte
+ TransactionalContentMD5 []byte
+ BlobHTTPHeaders *BlobHTTPHeaders
+ CpkInfo *CpkInfo
+ CpkScopeInfo *CpkScopeInfo
+ BlobAccessConditions *BlobAccessConditions
+}
+
+func (o *BlockBlobCommitBlockListOptions) format() (*blockBlobClientCommitBlockListOptions, *BlobHTTPHeaders, *LeaseAccessConditions, *CpkInfo, *CpkScopeInfo, *ModifiedAccessConditions) {
+ if o == nil {
+ return nil, nil, nil, nil, nil, nil
+ }
+
+ options := &blockBlobClientCommitBlockListOptions{
+ BlobTagsString: serializeBlobTagsToStrPtr(o.BlobTagsMap),
+ Metadata: o.Metadata,
+ RequestID: o.RequestID,
+ Tier: o.Tier,
+ Timeout: o.Timeout,
+ TransactionalContentCRC64: o.TransactionalContentCRC64,
+ TransactionalContentMD5: o.TransactionalContentMD5,
+ }
+ leaseAccessConditions, modifiedAccessConditions := o.BlobAccessConditions.format()
+ return options, o.BlobHTTPHeaders, leaseAccessConditions, o.CpkInfo, o.CpkScopeInfo, modifiedAccessConditions
+}
+
+// BlockBlobCommitBlockListResponse contains the response from method BlockBlobClient.CommitBlockList.
+type BlockBlobCommitBlockListResponse struct {
+ blockBlobClientCommitBlockListResponse
+}
+
+func toBlockBlobCommitBlockListResponse(resp blockBlobClientCommitBlockListResponse) BlockBlobCommitBlockListResponse {
+ return BlockBlobCommitBlockListResponse{resp}
+}
+
+// ---------------------------------------------------------------------------------------------------------------------
+
+// BlockBlobGetBlockListOptions provides set of configurations for GetBlockList operation
+type BlockBlobGetBlockListOptions struct {
+ Snapshot *string
+ BlobAccessConditions *BlobAccessConditions
+}
+
+func (o *BlockBlobGetBlockListOptions) format() (*blockBlobClientGetBlockListOptions, *LeaseAccessConditions, *ModifiedAccessConditions) {
+ if o == nil {
+ return nil, nil, nil
+ }
+
+ leaseAccessConditions, modifiedAccessConditions := o.BlobAccessConditions.format()
+ return &blockBlobClientGetBlockListOptions{Snapshot: o.Snapshot}, leaseAccessConditions, modifiedAccessConditions
+}
+
+// BlockBlobGetBlockListResponse contains the response from method BlockBlobClient.GetBlockList.
+type BlockBlobGetBlockListResponse struct {
+ blockBlobClientGetBlockListResponse
+}
+
+func toBlockBlobGetBlockListResponse(resp blockBlobClientGetBlockListResponse) BlockBlobGetBlockListResponse {
+ return BlockBlobGetBlockListResponse{resp}
+}
+
+// ---------------------------------------------------------------------------------------------------------------------
+
+// BlockBlobCopyFromURLOptions provides set of configurations for CopyBlockBlobFromURL operation
+type BlockBlobCopyFromURLOptions struct {
+ // Optional. Used to set blob tags in various blob operations.
+ BlobTagsMap map[string]string
+ // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source.
+ CopySourceAuthorization *string
+ // Specifies the date time when the blobs immutability policy is set to expire.
+ ImmutabilityPolicyExpiry *time.Time
+ // Specifies the immutability policy mode to set on the blob.
+ ImmutabilityPolicyMode *BlobImmutabilityPolicyMode
+ // Specified if a legal hold should be set on the blob.
+ LegalHold *bool
+ // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the
+ // operation will copy the metadata from the source blob or file to the destination
+ // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata
+ // is not copied from the source blob or file. Note that beginning with
+ // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers,
+ // Blobs, and Metadata for more information.
+ Metadata map[string]string
+ // Specify the md5 calculated for the range of bytes that must be read from the copy source.
+ SourceContentMD5 []byte
+ // Optional. Indicates the tier to be set on the blob.
+ Tier *AccessTier
+
+ SourceModifiedAccessConditions *SourceModifiedAccessConditions
+
+ BlobAccessConditions *BlobAccessConditions
+}
+
+func (o *BlockBlobCopyFromURLOptions) format() (*blobClientCopyFromURLOptions, *SourceModifiedAccessConditions, *ModifiedAccessConditions, *LeaseAccessConditions) {
+ if o == nil {
+ return nil, nil, nil, nil
+ }
+
+ options := &blobClientCopyFromURLOptions{
+ BlobTagsString: serializeBlobTagsToStrPtr(o.BlobTagsMap),
+ CopySourceAuthorization: o.CopySourceAuthorization,
+ ImmutabilityPolicyExpiry: o.ImmutabilityPolicyExpiry,
+ ImmutabilityPolicyMode: o.ImmutabilityPolicyMode,
+ LegalHold: o.LegalHold,
+ Metadata: o.Metadata,
+ SourceContentMD5: o.SourceContentMD5,
+ Tier: o.Tier,
+ }
+
+ leaseAccessConditions, modifiedAccessConditions := o.BlobAccessConditions.format()
+ return options, o.SourceModifiedAccessConditions, modifiedAccessConditions, leaseAccessConditions
+}
+
+// BlockBlobCopyFromURLResponse contains the response from method BlockBlobClient.CopyFromURL.
+type BlockBlobCopyFromURLResponse struct {
+ blobClientCopyFromURLResponse
+}
+
+func toBlockBlobCopyFromURLResponse(resp blobClientCopyFromURLResponse) BlockBlobCopyFromURLResponse {
+ return BlockBlobCopyFromURLResponse{resp}
+}
+
+// ---------------------------------------------------------------------------------------------------------------------
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_block_blob_request_options.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_block_blob_request_options.go
deleted file mode 100644
index f074c66bc78..00000000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_block_blob_request_options.go
+++ /dev/null
@@ -1,166 +0,0 @@
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License.
-
-package azblob
-
-type UploadBlockBlobOptions struct {
- // Optional. Used to set blob tags in various blob operations.
- TagsMap map[string]string
-
- // Optional. Specifies a user-defined name-value pair associated with the blob.
- Metadata map[string]string
-
- // Optional. Indicates the tier to be set on the blob.
- Tier *AccessTier
-
- // Specify the transactional md5 for the body, to be validated by the service.
- TransactionalContentMD5 []byte
-
- HTTPHeaders *BlobHTTPHeaders
- CpkInfo *CpkInfo
- CpkScopeInfo *CpkScopeInfo
- BlobAccessConditions *BlobAccessConditions
-}
-
-func (o *UploadBlockBlobOptions) pointers() (*BlockBlobUploadOptions, *BlobHTTPHeaders, *LeaseAccessConditions,
- *CpkInfo, *CpkScopeInfo, *ModifiedAccessConditions) {
- if o == nil {
- return nil, nil, nil, nil, nil, nil
- }
-
- basics := BlockBlobUploadOptions{
- BlobTagsString: serializeBlobTagsToStrPtr(o.TagsMap),
- Metadata: o.Metadata,
- Tier: o.Tier,
- TransactionalContentMD5: o.TransactionalContentMD5,
- }
-
- leaseAccessConditions, modifiedAccessConditions := o.BlobAccessConditions.pointers()
- return &basics, o.HTTPHeaders, leaseAccessConditions, o.CpkInfo, o.CpkScopeInfo, modifiedAccessConditions
-}
-
-type StageBlockOptions struct {
- CpkInfo *CpkInfo
- CpkScopeInfo *CpkScopeInfo
- LeaseAccessConditions *LeaseAccessConditions
- BlockBlobStageBlockOptions *BlockBlobStageBlockOptions
-}
-
-func (o *StageBlockOptions) pointers() (*LeaseAccessConditions, *BlockBlobStageBlockOptions, *CpkInfo, *CpkScopeInfo) {
- if o == nil {
- return nil, nil, nil, nil
- }
-
- return o.LeaseAccessConditions, o.BlockBlobStageBlockOptions, o.CpkInfo, o.CpkScopeInfo
-}
-
-type StageBlockFromURLOptions struct {
- LeaseAccessConditions *LeaseAccessConditions
- SourceModifiedAccessConditions *SourceModifiedAccessConditions
- // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
- RequestID *string
- // Specify the md5 calculated for the range of bytes that must be read from the copy source.
- SourceContentMD5 []byte
- // Specify the crc64 calculated for the range of bytes that must be read from the copy source.
- SourceContentcrc64 []byte
-
- Offset *int64
-
- Count *int64
- // The timeout parameter is expressed in seconds.
- Timeout *int32
-
- CpkInfo *CpkInfo
- CpkScopeInfo *CpkScopeInfo
-}
-
-func (o *StageBlockFromURLOptions) pointers() (*LeaseAccessConditions, *SourceModifiedAccessConditions, *BlockBlobStageBlockFromURLOptions, *CpkInfo, *CpkScopeInfo) {
- if o == nil {
- return nil, nil, nil, nil, nil
- }
-
- options := &BlockBlobStageBlockFromURLOptions{
- RequestID: o.RequestID,
- SourceContentMD5: o.SourceContentMD5,
- SourceContentcrc64: o.SourceContentcrc64,
- SourceRange: getSourceRange(o.Offset, o.Count),
- Timeout: o.Timeout,
- }
-
- return o.LeaseAccessConditions, o.SourceModifiedAccessConditions, options, o.CpkInfo, o.CpkScopeInfo
-}
-
-type CommitBlockListOptions struct {
- BlobTagsMap map[string]string
- Metadata map[string]string
- RequestID *string
- Tier *AccessTier
- Timeout *int32
- TransactionalContentCRC64 []byte
- TransactionalContentMD5 []byte
- BlobHTTPHeaders *BlobHTTPHeaders
- CpkInfo *CpkInfo
- CpkScopeInfo *CpkScopeInfo
- BlobAccessConditions *BlobAccessConditions
-}
-
-func (o *CommitBlockListOptions) pointers() (*BlockBlobCommitBlockListOptions, *BlobHTTPHeaders, *CpkInfo, *CpkScopeInfo, *ModifiedAccessConditions, *LeaseAccessConditions) {
- if o == nil {
- return nil, nil, nil, nil, nil, nil
- }
-
- options := &BlockBlobCommitBlockListOptions{
- BlobTagsString: serializeBlobTagsToStrPtr(o.BlobTagsMap),
- Metadata: o.Metadata,
- RequestID: o.RequestID,
- Tier: o.Tier,
- Timeout: o.Timeout,
- TransactionalContentCRC64: o.TransactionalContentCRC64,
- TransactionalContentMD5: o.TransactionalContentMD5,
- }
- leaseAccessConditions, modifiedAccessConditions := o.BlobAccessConditions.pointers()
- return options, o.BlobHTTPHeaders, o.CpkInfo, o.CpkScopeInfo, modifiedAccessConditions, leaseAccessConditions
-}
-
-type GetBlockListOptions struct {
- BlockBlobGetBlockListOptions *BlockBlobGetBlockListOptions
- BlobAccessConditions *BlobAccessConditions
-}
-
-func (o *GetBlockListOptions) pointers() (*BlockBlobGetBlockListOptions, *ModifiedAccessConditions, *LeaseAccessConditions) {
- if o == nil {
- return nil, nil, nil
- }
-
- leaseAccessConditions, modifiedAccessConditions := o.BlobAccessConditions.pointers()
- return o.BlockBlobGetBlockListOptions, modifiedAccessConditions, leaseAccessConditions
-}
-
-type CopyBlockBlobFromURLOptions struct {
- BlobTagsMap map[string]string
- Metadata map[string]string
- RequestID *string
- SourceContentMD5 []byte
- Tier *AccessTier
- Timeout *int32
- SourceModifiedAccessConditions *SourceModifiedAccessConditions
- BlobAccessConditions *BlobAccessConditions
-}
-
-func (o *CopyBlockBlobFromURLOptions) pointers() (*BlobCopyFromURLOptions, *SourceModifiedAccessConditions, *ModifiedAccessConditions, *LeaseAccessConditions) {
- if o == nil {
- return nil, nil, nil, nil
- }
-
- options := &BlobCopyFromURLOptions{
- BlobTagsString: serializeBlobTagsToStrPtr(o.BlobTagsMap),
- Metadata: o.Metadata,
- RequestID: o.RequestID,
- SourceContentMD5: o.SourceContentMD5,
- Tier: o.Tier,
- Timeout: o.Timeout,
- }
-
- leaseAccessConditions, modifiedAccessConditions := o.BlobAccessConditions.pointers()
- return options, o.SourceModifiedAccessConditions, modifiedAccessConditions, leaseAccessConditions
-}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_client_options.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_client_options.go
deleted file mode 100644
index 41d1870a2d3..00000000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_client_options.go
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License.
-
-package azblob
-
-import (
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
-)
-
-type ClientOptions struct {
- // Transporter sets the transport for making HTTP requests.
- Transporter policy.Transporter
- // Retry configures the built-in retry policy behavior.
- Retry policy.RetryOptions
- // Telemetry configures the built-in telemetry policy behavior.
- Telemetry policy.TelemetryOptions
- // PerCallOptions are options to run on every request
- PerCallOptions []policy.Policy
-}
-
-func (o *ClientOptions) getConnectionOptions() *policy.ClientOptions {
- if o == nil {
- return nil
- }
-
- return &policy.ClientOptions{
- Transport: o.Transporter,
- Retry: o.Retry,
- Telemetry: o.Telemetry,
- PerCallPolicies: o.PerCallOptions,
- }
-}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_client_util.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_client_util.go
new file mode 100644
index 00000000000..657a767dd54
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_client_util.go
@@ -0,0 +1,55 @@
+//go:build go1.18
+// +build go1.18
+
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+package azblob
+
+import (
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
+)
+
+// ClientOptions adds additional client options while constructing connection
+type ClientOptions struct {
+ // Logging configures the built-in logging policy.
+ Logging policy.LogOptions
+
+ // Retry configures the built-in retry policy.
+ Retry policy.RetryOptions
+
+ // Telemetry configures the built-in telemetry policy.
+ Telemetry policy.TelemetryOptions
+
+ // Transport sets the transport for HTTP requests.
+ Transport policy.Transporter
+
+ // PerCallPolicies contains custom policies to inject into the pipeline.
+ // Each policy is executed once per request.
+ PerCallPolicies []policy.Policy
+
+ // PerRetryPolicies contains custom policies to inject into the pipeline.
+ // Each policy is executed once per request, and for each retry of that request.
+ PerRetryPolicies []policy.Policy
+}
+
+func (c *ClientOptions) toPolicyOptions() *azcore.ClientOptions {
+ return &azcore.ClientOptions{
+ Logging: c.Logging,
+ Retry: c.Retry,
+ Telemetry: c.Telemetry,
+ Transport: c.Transport,
+ PerCallPolicies: c.PerCallPolicies,
+ PerRetryPolicies: c.PerRetryPolicies,
+ }
+}
+
+// ---------------------------------------------------------------------------------------------------------------------
+
+func getConnectionOptions(options *ClientOptions) *policy.ClientOptions {
+ if options == nil {
+ options = &ClientOptions{}
+ }
+ return options.toPolicyOptions()
+}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_container_client_util.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_container_client_util.go
new file mode 100644
index 00000000000..a33103e4b77
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_container_client_util.go
@@ -0,0 +1,271 @@
+//go:build go1.18
+// +build go1.18
+
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+package azblob
+
+// ---------------------------------------------------------------------------------------------------------------------
+
+// ContainerCreateOptions provides set of configurations for CreateContainer operation
+type ContainerCreateOptions struct {
+ // Specifies whether data in the container may be accessed publicly and the level of access
+ Access *PublicAccessType
+
+ // Optional. Specifies a user-defined name-value pair associated with the blob.
+ Metadata map[string]string
+
+ // Optional. Specifies the encryption scope settings to set on the container.
+ CpkScope *ContainerCpkScopeInfo
+}
+
+func (o *ContainerCreateOptions) format() (*containerClientCreateOptions, *ContainerCpkScopeInfo) {
+ if o == nil {
+ return nil, nil
+ }
+
+ basicOptions := containerClientCreateOptions{
+ Access: o.Access,
+ Metadata: o.Metadata,
+ }
+
+ return &basicOptions, o.CpkScope
+}
+
+// ContainerCreateResponse is wrapper around containerClientCreateResponse
+type ContainerCreateResponse struct {
+ containerClientCreateResponse
+}
+
+func toContainerCreateResponse(resp containerClientCreateResponse) ContainerCreateResponse {
+ return ContainerCreateResponse{resp}
+}
+
+// ---------------------------------------------------------------------------------------------------------------------
+
+// ContainerDeleteOptions provides set of configurations for DeleteContainer operation
+type ContainerDeleteOptions struct {
+ LeaseAccessConditions *LeaseAccessConditions
+ ModifiedAccessConditions *ModifiedAccessConditions
+}
+
+func (o *ContainerDeleteOptions) format() (*containerClientDeleteOptions, *LeaseAccessConditions, *ModifiedAccessConditions) {
+ if o == nil {
+ return nil, nil, nil
+ }
+
+ return nil, o.LeaseAccessConditions, o.ModifiedAccessConditions
+}
+
+// ContainerDeleteResponse contains the response from method ContainerClient.Delete.
+type ContainerDeleteResponse struct {
+ containerClientDeleteResponse
+}
+
+func toContainerDeleteResponse(resp containerClientDeleteResponse) ContainerDeleteResponse {
+ return ContainerDeleteResponse{resp}
+}
+
+// ---------------------------------------------------------------------------------------------------------------------
+
+// ContainerGetPropertiesOptions provides set of configurations for GetPropertiesContainer operation
+type ContainerGetPropertiesOptions struct {
+ LeaseAccessConditions *LeaseAccessConditions
+}
+
+func (o *ContainerGetPropertiesOptions) format() (*containerClientGetPropertiesOptions, *LeaseAccessConditions) {
+ if o == nil {
+ return nil, nil
+ }
+
+ return nil, o.LeaseAccessConditions
+}
+
+// ContainerGetPropertiesResponse contains the response from method ContainerClient.GetProperties
+type ContainerGetPropertiesResponse struct {
+ containerClientGetPropertiesResponse
+}
+
+func toContainerGetPropertiesResponse(resp containerClientGetPropertiesResponse) ContainerGetPropertiesResponse {
+ return ContainerGetPropertiesResponse{resp}
+}
+
+// ---------------------------------------------------------------------------------------------------------------------
+
+// ContainerSetMetadataOptions provides set of configurations for SetMetadataContainer operation
+type ContainerSetMetadataOptions struct {
+ Metadata map[string]string
+ LeaseAccessConditions *LeaseAccessConditions
+ ModifiedAccessConditions *ModifiedAccessConditions
+}
+
+func (o *ContainerSetMetadataOptions) format() (*containerClientSetMetadataOptions, *LeaseAccessConditions, *ModifiedAccessConditions) {
+ if o == nil {
+ return nil, nil, nil
+ }
+
+ return &containerClientSetMetadataOptions{Metadata: o.Metadata}, o.LeaseAccessConditions, o.ModifiedAccessConditions
+}
+
+// ContainerSetMetadataResponse contains the response from method containerClient.SetMetadata
+type ContainerSetMetadataResponse struct {
+ containerClientSetMetadataResponse
+}
+
+func toContainerSetMetadataResponse(resp containerClientSetMetadataResponse) ContainerSetMetadataResponse {
+ return ContainerSetMetadataResponse{resp}
+}
+
+// ---------------------------------------------------------------------------------------------------------------------
+
+// ContainerGetAccessPolicyOptions provides set of configurations for GetAccessPolicy operation
+type ContainerGetAccessPolicyOptions struct {
+ LeaseAccessConditions *LeaseAccessConditions
+}
+
+func (o *ContainerGetAccessPolicyOptions) format() (*containerClientGetAccessPolicyOptions, *LeaseAccessConditions) {
+ if o == nil {
+ return nil, nil
+ }
+
+ return nil, o.LeaseAccessConditions
+}
+
+// ContainerGetAccessPolicyResponse contains the response from method ContainerClient.GetAccessPolicy.
+type ContainerGetAccessPolicyResponse struct {
+ containerClientGetAccessPolicyResponse
+}
+
+func toContainerGetAccessPolicyResponse(resp containerClientGetAccessPolicyResponse) ContainerGetAccessPolicyResponse {
+ return ContainerGetAccessPolicyResponse{resp}
+}
+
+// ---------------------------------------------------------------------------------------------------------------------
+
+// ContainerSetAccessPolicyOptions provides set of configurations for ContainerClient.SetAccessPolicy operation
+type ContainerSetAccessPolicyOptions struct {
+ AccessConditions *ContainerAccessConditions
+ // Specifies whether data in the container may be accessed publicly and the level of access
+ Access *PublicAccessType
+ // the acls for the container
+ ContainerACL []*SignedIdentifier
+ // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+ // analytics logging is enabled.
+ RequestID *string
+ // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+ // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+ Timeout *int32
+}
+
+func (o *ContainerSetAccessPolicyOptions) format() (*containerClientSetAccessPolicyOptions, *LeaseAccessConditions, *ModifiedAccessConditions) {
+ if o == nil {
+ return nil, nil, nil
+ }
+ mac, lac := o.AccessConditions.format()
+ return &containerClientSetAccessPolicyOptions{
+ Access: o.Access,
+ ContainerACL: o.ContainerACL,
+ RequestID: o.RequestID,
+ }, lac, mac
+}
+
+// ContainerSetAccessPolicyResponse contains the response from method ContainerClient.SetAccessPolicy
+type ContainerSetAccessPolicyResponse struct {
+ containerClientSetAccessPolicyResponse
+}
+
+func toContainerSetAccessPolicyResponse(resp containerClientSetAccessPolicyResponse) ContainerSetAccessPolicyResponse {
+ return ContainerSetAccessPolicyResponse{resp}
+}
+
+// ---------------------------------------------------------------------------------------------------------------------
+
+// ContainerListBlobsFlatOptions provides set of configurations for SetAccessPolicy operation
+type ContainerListBlobsFlatOptions struct {
+ // Include this parameter to specify one or more datasets to include in the response.
+ Include []ListBlobsIncludeItem
+ // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The
+ // operation returns the NextMarker value within the response body if the listing
+ // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used
+ // as the value for the marker parameter in a subsequent call to request the next
+ // page of list items. The marker value is opaque to the client.
+ Marker *string
+ // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value
+ // greater than 5000, the server will return up to 5000 items. Note that if the
+ // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder
+ // of the results. For this reason, it is possible that the service will
+ // return fewer results than specified by maxresults, or than the default of 5000.
+ MaxResults *int32
+ // Filters the results to return only containers whose name begins with the specified prefix.
+ Prefix *string
+}
+
+func (o *ContainerListBlobsFlatOptions) format() *containerClientListBlobFlatSegmentOptions {
+ if o == nil {
+ return nil
+ }
+
+ return &containerClientListBlobFlatSegmentOptions{
+ Include: o.Include,
+ Marker: o.Marker,
+ Maxresults: o.MaxResults,
+ Prefix: o.Prefix,
+ }
+}
+
+// ContainerListBlobFlatPager provides operations for iterating over paged responses
+type ContainerListBlobFlatPager struct {
+ *containerClientListBlobFlatSegmentPager
+}
+
+func toContainerListBlobFlatSegmentPager(resp *containerClientListBlobFlatSegmentPager) *ContainerListBlobFlatPager {
+ return &ContainerListBlobFlatPager{resp}
+}
+
+// ---------------------------------------------------------------------------------------------------------------------
+
+//ContainerListBlobsHierarchyOptions provides set of configurations for ContainerClient.ListBlobsHierarchy
+type ContainerListBlobsHierarchyOptions struct {
+ // Include this parameter to specify one or more datasets to include in the response.
+ Include []ListBlobsIncludeItem
+ // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The
+ // operation returns the NextMarker value within the response body if the listing
+ // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used
+ // as the value for the marker parameter in a subsequent call to request the next
+ // page of list items. The marker value is opaque to the client.
+ Marker *string
+ // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value
+ // greater than 5000, the server will return up to 5000 items. Note that if the
+ // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder
+ // of the results. For this reason, it is possible that the service will
+ // return fewer results than specified by maxresults, or than the default of 5000.
+ MaxResults *int32
+ // Filters the results to return only containers whose name begins with the specified prefix.
+ Prefix *string
+}
+
+func (o *ContainerListBlobsHierarchyOptions) format() *containerClientListBlobHierarchySegmentOptions {
+ if o == nil {
+ return nil
+ }
+
+ return &containerClientListBlobHierarchySegmentOptions{
+ Include: o.Include,
+ Marker: o.Marker,
+ Maxresults: o.MaxResults,
+ Prefix: o.Prefix,
+ }
+}
+
+// ContainerListBlobHierarchyPager provides operations for iterating over paged responses.
+type ContainerListBlobHierarchyPager struct {
+ containerClientListBlobHierarchySegmentPager
+}
+
+func toContainerListBlobHierarchySegmentPager(resp *containerClientListBlobHierarchySegmentPager) *ContainerListBlobHierarchyPager {
+ if resp == nil {
+ return nil
+ }
+ return &ContainerListBlobHierarchyPager{*resp}
+}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_container_lease_client_util.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_container_lease_client_util.go
new file mode 100644
index 00000000000..87572e9178f
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_container_lease_client_util.go
@@ -0,0 +1,166 @@
+//go:build go1.18
+// +build go1.18
+
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+package azblob
+
+import (
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/to"
+ "github.com/Azure/azure-sdk-for-go/sdk/internal/uuid"
+)
+
+// ---------------------------------------------------------------------------------------------------------------------
+
+// LeaseBreakNaturally tells ContainerClient's or BlobClient's BreakLease method to break the lease using service semantics.
+const LeaseBreakNaturally = -1
+
+func leasePeriodPointer(period int32) *int32 {
+ if period != LeaseBreakNaturally {
+ return &period
+ } else {
+ return nil
+ }
+}
+
+// ---------------------------------------------------------------------------------------------------------------------
+
+// ContainerAcquireLeaseOptions provides set of configurations for AcquireLeaseContainer operation
+type ContainerAcquireLeaseOptions struct {
+ Duration *int32
+ ModifiedAccessConditions *ModifiedAccessConditions
+}
+
+func (o *ContainerAcquireLeaseOptions) format() (containerClientAcquireLeaseOptions, *ModifiedAccessConditions) {
+ if o == nil {
+ return containerClientAcquireLeaseOptions{}, nil
+ }
+ containerAcquireLeaseOptions := containerClientAcquireLeaseOptions{
+ Duration: o.Duration,
+ }
+
+ return containerAcquireLeaseOptions, o.ModifiedAccessConditions
+}
+
+// ContainerAcquireLeaseResponse contains the response from method ContainerLeaseClient.AcquireLease.
+type ContainerAcquireLeaseResponse struct {
+ containerClientAcquireLeaseResponse
+}
+
+func toContainerAcquireLeaseResponse(resp containerClientAcquireLeaseResponse) ContainerAcquireLeaseResponse {
+ return ContainerAcquireLeaseResponse{resp}
+}
+
+// ---------------------------------------------------------------------------------------------------------------------
+
+// ContainerBreakLeaseOptions provides set of configurations for BreakLeaseContainer operation
+type ContainerBreakLeaseOptions struct {
+ BreakPeriod *int32
+ ModifiedAccessConditions *ModifiedAccessConditions
+}
+
+func (o *ContainerBreakLeaseOptions) format() (*containerClientBreakLeaseOptions, *ModifiedAccessConditions) {
+ if o == nil {
+ return nil, nil
+ }
+
+ containerBreakLeaseOptions := &containerClientBreakLeaseOptions{
+ BreakPeriod: o.BreakPeriod,
+ }
+
+ return containerBreakLeaseOptions, o.ModifiedAccessConditions
+}
+
+// ContainerBreakLeaseResponse contains the response from method ContainerLeaseClient.BreakLease.
+type ContainerBreakLeaseResponse struct {
+ containerClientBreakLeaseResponse
+}
+
+func toContainerBreakLeaseResponse(resp containerClientBreakLeaseResponse) ContainerBreakLeaseResponse {
+ return ContainerBreakLeaseResponse{resp}
+}
+
+// ---------------------------------------------------------------------------------------------------------------------
+
+// ContainerChangeLeaseOptions provides set of configurations for ChangeLeaseContainer operation
+type ContainerChangeLeaseOptions struct {
+ ProposedLeaseID *string
+ ModifiedAccessConditions *ModifiedAccessConditions
+}
+
+func (o *ContainerChangeLeaseOptions) format() (*string, *containerClientChangeLeaseOptions, *ModifiedAccessConditions, error) {
+ generatedUuid, err := uuid.New()
+ if err != nil {
+ return nil, nil, nil, err
+ }
+ leaseID := to.Ptr(generatedUuid.String())
+ if o == nil {
+ return leaseID, nil, nil, err
+ }
+
+ if o.ProposedLeaseID == nil {
+ o.ProposedLeaseID = leaseID
+ }
+
+ return o.ProposedLeaseID, nil, o.ModifiedAccessConditions, err
+}
+
+// ContainerChangeLeaseResponse contains the response from method ContainerLeaseClient.ChangeLease.
+type ContainerChangeLeaseResponse struct {
+ containerClientChangeLeaseResponse
+}
+
+func toContainerChangeLeaseResponse(resp containerClientChangeLeaseResponse) ContainerChangeLeaseResponse {
+ return ContainerChangeLeaseResponse{resp}
+}
+
+// ---------------------------------------------------------------------------------------------------------------------
+
+// ContainerReleaseLeaseOptions provides set of configurations for ReleaseLeaseContainer operation
+type ContainerReleaseLeaseOptions struct {
+ ModifiedAccessConditions *ModifiedAccessConditions
+}
+
+func (o *ContainerReleaseLeaseOptions) format() (*containerClientReleaseLeaseOptions, *ModifiedAccessConditions) {
+ if o == nil {
+ return nil, nil
+ }
+
+ return nil, o.ModifiedAccessConditions
+}
+
+// ContainerReleaseLeaseResponse contains the response from method ContainerLeaseClient.ReleaseLease.
+type ContainerReleaseLeaseResponse struct {
+ containerClientReleaseLeaseResponse
+}
+
+func toContainerReleaseLeaseResponse(resp containerClientReleaseLeaseResponse) ContainerReleaseLeaseResponse {
+ return ContainerReleaseLeaseResponse{resp}
+}
+
+// ---------------------------------------------------------------------------------------------------------------------
+
+// ContainerRenewLeaseOptions provides set of configurations for RenewLeaseContainer operation
+type ContainerRenewLeaseOptions struct {
+ ModifiedAccessConditions *ModifiedAccessConditions
+}
+
+func (o *ContainerRenewLeaseOptions) format() (*containerClientRenewLeaseOptions, *ModifiedAccessConditions) {
+ if o == nil {
+ return nil, nil
+ }
+
+ return nil, o.ModifiedAccessConditions
+}
+
+// ContainerRenewLeaseResponse contains the response from method ContainerLeaseClient.RenewLease.
+type ContainerRenewLeaseResponse struct {
+ containerClientRenewLeaseResponse
+}
+
+func toContainerRenewLeaseResponse(resp containerClientRenewLeaseResponse) ContainerRenewLeaseResponse {
+ return ContainerRenewLeaseResponse{resp}
+}
+
+// ---------------------------------------------------------------------------------------------------------------------
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_container_request_options.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_container_request_options.go
deleted file mode 100644
index 26448cd421f..00000000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_container_request_options.go
+++ /dev/null
@@ -1,96 +0,0 @@
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License.
-
-package azblob
-
-type CreateContainerOptions struct {
- // Specifies whether data in the container may be accessed publicly and the level of access
- Access *PublicAccessType
-
- // Optional. Specifies a user-defined name-value pair associated with the blob.
- Metadata map[string]string
-
- // Optional. Specifies the encryption scope settings to set on the container.
- cpkScope *ContainerCpkScopeInfo
-}
-
-func (o *CreateContainerOptions) pointers() (*ContainerCreateOptions, *ContainerCpkScopeInfo) {
- if o == nil {
- return nil, nil
- }
-
- basicOptions := ContainerCreateOptions{
- Access: o.Access,
- Metadata: o.Metadata,
- }
-
- return &basicOptions, o.cpkScope
-}
-
-type DeleteContainerOptions struct {
- LeaseAccessConditions *LeaseAccessConditions
- ModifiedAccessConditions *ModifiedAccessConditions
-}
-
-func (o *DeleteContainerOptions) pointers() (*ContainerDeleteOptions, *LeaseAccessConditions, *ModifiedAccessConditions) {
- if o == nil {
- return nil, nil, nil
- }
-
- return nil, o.LeaseAccessConditions, o.ModifiedAccessConditions
-}
-
-type GetPropertiesOptionsContainer struct {
- ContainerGetPropertiesOptions *ContainerGetPropertiesOptions
- LeaseAccessConditions *LeaseAccessConditions
-}
-
-func (o *GetPropertiesOptionsContainer) pointers() (*ContainerGetPropertiesOptions, *LeaseAccessConditions) {
- if o == nil {
- return nil, nil
- }
-
- return o.ContainerGetPropertiesOptions, o.LeaseAccessConditions
-}
-
-type GetAccessPolicyOptions struct {
- ContainerGetAccessPolicyOptions *ContainerGetAccessPolicyOptions
- LeaseAccessConditions *LeaseAccessConditions
-}
-
-func (o *GetAccessPolicyOptions) pointers() (*ContainerGetAccessPolicyOptions, *LeaseAccessConditions) {
- if o == nil {
- return nil, nil
- }
-
- return o.ContainerGetAccessPolicyOptions, o.LeaseAccessConditions
-}
-
-type SetAccessPolicyOptions struct {
- // At least Access and ContainerACL must be specified
- ContainerSetAccessPolicyOptions ContainerSetAccessPolicyOptions
- AccessConditions *ContainerAccessConditions
-}
-
-func (o *SetAccessPolicyOptions) pointers() (ContainerSetAccessPolicyOptions, *LeaseAccessConditions, *ModifiedAccessConditions) {
- if o == nil {
- return ContainerSetAccessPolicyOptions{}, nil, nil
- }
- mac, lac := o.AccessConditions.pointers()
- return o.ContainerSetAccessPolicyOptions, lac, mac
-}
-
-type SetMetadataContainerOptions struct {
- Metadata map[string]string
- LeaseAccessConditions *LeaseAccessConditions
- ModifiedAccessConditions *ModifiedAccessConditions
-}
-
-func (o *SetMetadataContainerOptions) pointers() (*ContainerSetMetadataOptions, *LeaseAccessConditions, *ModifiedAccessConditions) {
- if o == nil {
- return nil, nil, nil
- }
-
- options := ContainerSetMetadataOptions{Metadata: o.Metadata}
- return &options, o.LeaseAccessConditions, o.ModifiedAccessConditions
-}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_highlevel_util.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_highlevel_util.go
new file mode 100644
index 00000000000..c7a67abe774
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_highlevel_util.go
@@ -0,0 +1,201 @@
+//go:build go1.18
+// +build go1.18
+
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+package azblob
+
+import (
+ "context"
+ "fmt"
+)
+
+const _1MiB = 1024 * 1024
+
+// UploadOption identifies options used by the UploadBuffer and UploadFile functions.
+type UploadOption struct {
+ // BlockSize specifies the block size to use; the default (and maximum size) is BlockBlobMaxStageBlockBytes.
+ BlockSize int64
+
+ // Progress is a function that is invoked periodically as bytes are sent to the BlockBlobClient.
+ // Note that the progress reporting is not always increasing; it can go down when retrying a request.
+ Progress func(bytesTransferred int64)
+
+ // HTTPHeaders indicates the HTTP headers to be associated with the blob.
+ HTTPHeaders *BlobHTTPHeaders
+
+ // Metadata indicates the metadata to be associated with the blob when PutBlockList is called.
+ Metadata map[string]string
+
+ // BlobAccessConditions indicates the access conditions for the block blob.
+ BlobAccessConditions *BlobAccessConditions
+
+ // AccessTier indicates the tier of blob
+ AccessTier *AccessTier
+
+ // TagsMap
+ TagsMap map[string]string
+
+ // ClientProvidedKeyOptions indicates the client provided key by name and/or by value to encrypt/decrypt data.
+ CpkInfo *CpkInfo
+ CpkScopeInfo *CpkScopeInfo
+
+ // Parallelism indicates the maximum number of blocks to upload in parallel (0=default)
+ Parallelism uint16
+ // Optional header, Specifies the transactional crc64 for the body, to be validated by the service.
+ TransactionalContentCRC64 *[]byte
+ // Specify the transactional md5 for the body, to be validated by the service.
+ TransactionalContentMD5 *[]byte
+}
+
+func (o *UploadOption) getStageBlockOptions() *BlockBlobStageBlockOptions {
+ leaseAccessConditions, _ := o.BlobAccessConditions.format()
+ return &BlockBlobStageBlockOptions{
+ CpkInfo: o.CpkInfo,
+ CpkScopeInfo: o.CpkScopeInfo,
+ LeaseAccessConditions: leaseAccessConditions,
+ }
+}
+
+func (o *UploadOption) getUploadBlockBlobOptions() *BlockBlobUploadOptions {
+ return &BlockBlobUploadOptions{
+ TagsMap: o.TagsMap,
+ Metadata: o.Metadata,
+ Tier: o.AccessTier,
+ HTTPHeaders: o.HTTPHeaders,
+ BlobAccessConditions: o.BlobAccessConditions,
+ CpkInfo: o.CpkInfo,
+ CpkScopeInfo: o.CpkScopeInfo,
+ }
+}
+
+func (o *UploadOption) getCommitBlockListOptions() *BlockBlobCommitBlockListOptions {
+ return &BlockBlobCommitBlockListOptions{
+ BlobTagsMap: o.TagsMap,
+ Metadata: o.Metadata,
+ Tier: o.AccessTier,
+ BlobHTTPHeaders: o.HTTPHeaders,
+ CpkInfo: o.CpkInfo,
+ CpkScopeInfo: o.CpkScopeInfo,
+ }
+}
+
+// ---------------------------------------------------------------------------------------------------------------------
+
+// UploadStreamOptions provides set of configurations for UploadStream operation
+type UploadStreamOptions struct {
+ // TransferManager provides a TransferManager that controls buffer allocation/reuse and
+ // concurrency. This overrides BufferSize and MaxBuffers if set.
+ TransferManager TransferManager
+ transferMangerNotSet bool
+ // BufferSize sizes the buffer used to read data from source. If < 1 MiB, defaults to 1 MiB.
+ BufferSize int
+ // MaxBuffers defines the number of simultaneous uploads will be performed to upload the file.
+ MaxBuffers int
+ HTTPHeaders *BlobHTTPHeaders
+ Metadata map[string]string
+ BlobAccessConditions *BlobAccessConditions
+ AccessTier *AccessTier
+ BlobTagsMap map[string]string
+ CpkInfo *CpkInfo
+ CpkScopeInfo *CpkScopeInfo
+}
+
+func (u *UploadStreamOptions) defaults() error {
+ if u.TransferManager != nil {
+ return nil
+ }
+
+ if u.MaxBuffers == 0 {
+ u.MaxBuffers = 1
+ }
+
+ if u.BufferSize < _1MiB {
+ u.BufferSize = _1MiB
+ }
+
+ var err error
+ u.TransferManager, err = NewStaticBuffer(u.BufferSize, u.MaxBuffers)
+ if err != nil {
+ return fmt.Errorf("bug: default transfer manager could not be created: %s", err)
+ }
+ u.transferMangerNotSet = true
+ return nil
+}
+
+func (u *UploadStreamOptions) getStageBlockOptions() *BlockBlobStageBlockOptions {
+ leaseAccessConditions, _ := u.BlobAccessConditions.format()
+ return &BlockBlobStageBlockOptions{
+ CpkInfo: u.CpkInfo,
+ CpkScopeInfo: u.CpkScopeInfo,
+ LeaseAccessConditions: leaseAccessConditions,
+ }
+}
+
+func (u *UploadStreamOptions) getCommitBlockListOptions() *BlockBlobCommitBlockListOptions {
+ options := &BlockBlobCommitBlockListOptions{
+ BlobTagsMap: u.BlobTagsMap,
+ Metadata: u.Metadata,
+ Tier: u.AccessTier,
+ BlobHTTPHeaders: u.HTTPHeaders,
+ CpkInfo: u.CpkInfo,
+ CpkScopeInfo: u.CpkScopeInfo,
+ BlobAccessConditions: u.BlobAccessConditions,
+ }
+
+ return options
+}
+
+// ---------------------------------------------------------------------------------------------------------------------
+
+// DownloadOptions identifies options used by the DownloadToBuffer and DownloadToFile functions.
+type DownloadOptions struct {
+ // BlockSize specifies the block size to use for each parallel download; the default size is BlobDefaultDownloadBlockSize.
+ BlockSize int64
+
+ // Progress is a function that is invoked periodically as bytes are received.
+ Progress func(bytesTransferred int64)
+
+ // BlobAccessConditions indicates the access conditions used when making HTTP GET requests against the blob.
+ BlobAccessConditions *BlobAccessConditions
+
+ // ClientProvidedKeyOptions indicates the client provided key by name and/or by value to encrypt/decrypt data.
+ CpkInfo *CpkInfo
+ CpkScopeInfo *CpkScopeInfo
+
+ // Parallelism indicates the maximum number of blocks to download in parallel (0=default)
+ Parallelism uint16
+
+ // RetryReaderOptionsPerBlock is used when downloading each block.
+ RetryReaderOptionsPerBlock RetryReaderOptions
+}
+
+func (o *DownloadOptions) getBlobPropertiesOptions() *BlobGetPropertiesOptions {
+ return &BlobGetPropertiesOptions{
+ BlobAccessConditions: o.BlobAccessConditions,
+ CpkInfo: o.CpkInfo,
+ }
+}
+
+func (o *DownloadOptions) getDownloadBlobOptions(offSet, count int64, rangeGetContentMD5 *bool) *BlobDownloadOptions {
+ return &BlobDownloadOptions{
+ BlobAccessConditions: o.BlobAccessConditions,
+ CpkInfo: o.CpkInfo,
+ CpkScopeInfo: o.CpkScopeInfo,
+ Offset: &offSet,
+ Count: &count,
+ RangeGetContentMD5: rangeGetContentMD5,
+ }
+}
+
+// ---------------------------------------------------------------------------------------------------------------------
+
+// BatchTransferOptions identifies options used by DoBatchTransfer.
+type BatchTransferOptions struct {
+ TransferSize int64
+ ChunkSize int64
+ Parallelism uint16
+ Operation func(offset int64, chunkSize int64, ctx context.Context) error
+ OperationName string
+}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_lease_request_options.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_lease_request_options.go
deleted file mode 100644
index d9d4417f367..00000000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_lease_request_options.go
+++ /dev/null
@@ -1,190 +0,0 @@
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License.
-
-package azblob
-
-import (
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/to"
- "github.com/Azure/azure-sdk-for-go/sdk/internal/uuid"
-)
-
-type AcquireLeaseBlobOptions struct {
- // Specifies the Duration of the lease, in seconds, or negative one (-1) for a lease that never expires. A non-infinite lease
- // can be between 15 and 60 seconds. A lease Duration cannot be changed using renew or change.
- Duration *int32
-
- ModifiedAccessConditions *ModifiedAccessConditions
-}
-
-func (o *AcquireLeaseBlobOptions) pointers() (*BlobAcquireLeaseOptions, *ModifiedAccessConditions) {
- if o == nil {
- return nil, nil
- }
- return &BlobAcquireLeaseOptions{
- Duration: o.Duration,
- }, o.ModifiedAccessConditions
-}
-
-type BreakLeaseBlobOptions struct {
- // For a break operation, proposed Duration the lease should continue before it is broken, in seconds, between 0 and 60. This
- // break period is only used if it is shorter than the time remaining on the lease. If longer, the time remaining on the lease
- // is used. A new lease will not be available before the break period has expired, but the lease may be held for longer than
- // the break period. If this header does not appear with a break operation, a fixed-Duration lease breaks after the remaining
- // lease period elapses, and an infinite lease breaks immediately.
- BreakPeriod *int32
- ModifiedAccessConditions *ModifiedAccessConditions
-}
-
-func (o *BreakLeaseBlobOptions) pointers() (*BlobBreakLeaseOptions, *ModifiedAccessConditions) {
- if o == nil {
- return nil, nil
- }
-
- if o.BreakPeriod != nil {
- period := leasePeriodPointer(*o.BreakPeriod)
- return &BlobBreakLeaseOptions{
- BreakPeriod: period,
- }, o.ModifiedAccessConditions
- }
-
- return nil, o.ModifiedAccessConditions
-}
-
-type ChangeLeaseBlobOptions struct {
- ProposedLeaseID *string
- ModifiedAccessConditions *ModifiedAccessConditions
-}
-
-func (o *ChangeLeaseBlobOptions) pointers() (proposedLeaseI *string, modifiedAccessConditions *ModifiedAccessConditions, err error) {
- generatedUuid, err := uuid.New()
- if err != nil {
- return nil, nil, err
- }
- leaseID := to.StringPtr(generatedUuid.String())
- if o == nil {
- return leaseID, nil, nil
- }
-
- if o.ProposedLeaseID == nil {
- o.ProposedLeaseID = leaseID
- }
-
- return o.ProposedLeaseID, o.ModifiedAccessConditions, nil
-}
-
-type ReleaseLeaseBlobOptions struct {
- ModifiedAccessConditions *ModifiedAccessConditions
-}
-
-func (o *ReleaseLeaseBlobOptions) pointers() (blobReleaseLeaseOptions *BlobReleaseLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) {
- if o == nil {
- return nil, nil
- }
-
- return nil, o.ModifiedAccessConditions
-}
-
-type RenewLeaseBlobOptions struct {
- ModifiedAccessConditions *ModifiedAccessConditions
-}
-
-func (o *RenewLeaseBlobOptions) pointers() (blobRenewLeaseOptions *BlobRenewLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) {
- if o == nil {
- return nil, nil
- }
-
- return nil, o.ModifiedAccessConditions
-}
-
-type AcquireLeaseContainerOptions struct {
- Duration *int32
- ModifiedAccessConditions *ModifiedAccessConditions
-}
-
-func (o *AcquireLeaseContainerOptions) pointers() (*ContainerAcquireLeaseOptions, *ModifiedAccessConditions) {
- if o == nil {
- return nil, nil
- }
- containerAcquireLeaseOptions := &ContainerAcquireLeaseOptions{
- Duration: o.Duration,
- }
-
- return containerAcquireLeaseOptions, o.ModifiedAccessConditions
-}
-
-type BreakLeaseContainerOptions struct {
- BreakPeriod *int32
- ModifiedAccessConditions *ModifiedAccessConditions
-}
-
-func (o *BreakLeaseContainerOptions) pointers() (*ContainerBreakLeaseOptions, *ModifiedAccessConditions) {
- if o == nil {
- return nil, nil
- }
-
- containerBreakLeaseOptions := &ContainerBreakLeaseOptions{
- BreakPeriod: o.BreakPeriod,
- }
-
- return containerBreakLeaseOptions, o.ModifiedAccessConditions
-}
-
-type ChangeLeaseContainerOptions struct {
- ProposedLeaseID *string
- ModifiedAccessConditions *ModifiedAccessConditions
-}
-
-func (o *ChangeLeaseContainerOptions) pointers() (proposedLeaseID *string, modifiedAccessConditions *ModifiedAccessConditions, err error) {
- generatedUuid, err := uuid.New()
- if err != nil {
- return nil, nil, err
- }
- leaseID := to.StringPtr(generatedUuid.String())
- if o == nil {
- return leaseID, nil, err
- }
-
- if o.ProposedLeaseID == nil {
- o.ProposedLeaseID = leaseID
- }
-
- return o.ProposedLeaseID, o.ModifiedAccessConditions, err
-
-}
-
-type RenewLeaseContainerOptions struct {
- ModifiedAccessConditions *ModifiedAccessConditions
-}
-
-func (o *RenewLeaseContainerOptions) pointers() (containerRenewLeaseOptions *ContainerRenewLeaseOptions,
- modifiedAccessConditions *ModifiedAccessConditions) {
- if o == nil {
- return nil, nil
- }
-
- return nil, o.ModifiedAccessConditions
-}
-
-type ReleaseLeaseContainerOptions struct {
- ModifiedAccessConditions *ModifiedAccessConditions
-}
-
-func (o *ReleaseLeaseContainerOptions) pointers() (containerReleaseLeaseOptions *ContainerReleaseLeaseOptions,
- modifiedAccessConditions *ModifiedAccessConditions) {
- if o == nil {
- return nil, nil
- }
-
- return nil, o.ModifiedAccessConditions
-}
-
-// LeaseBreakNaturally tells ContainerClient's or BlobClient's BreakLease method to break the lease using service semantics.
-const LeaseBreakNaturally = -1
-
-func leasePeriodPointer(period int32) *int32 {
- if period != LeaseBreakNaturally {
- return &period
- } else {
- return nil
- }
-}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_page_blob_client_util.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_page_blob_client_util.go
new file mode 100644
index 00000000000..2be2758736a
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_page_blob_client_util.go
@@ -0,0 +1,402 @@
+//go:build go1.18
+// +build go1.18
+
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+package azblob
+
+import (
+ "strconv"
+ "time"
+)
+
+// ---------------------------------------------------------------------------------------------------------------------
+
+func rangeToString(offset, count int64) string {
+ return "bytes=" + strconv.FormatInt(offset, 10) + "-" + strconv.FormatInt(offset+count-1, 10)
+}
+
+// ---------------------------------------------------------------------------------------------------------------------
+
+// PageBlobCreateOptions provides set of configurations for CreatePageBlob operation
+type PageBlobCreateOptions struct {
+ // Set for page blobs only. The sequence number is a user-controlled value that you can use to track requests. The value of
+ // the sequence number must be between 0 and 2^63 - 1.
+ BlobSequenceNumber *int64
+ // Optional. Used to set blob tags in various blob operations.
+ BlobTagsMap map[string]string
+ // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the
+ // operation will copy the metadata from the source blob or file to the destination blob. If one or more name-value pairs
+ // are specified, the destination blob is created with the specified metadata, and metadata is not copied from the source
+ // blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers.
+ // See Naming and Referencing Containers, Blobs, and Metadata for more information.
+ Metadata map[string]string
+ // Optional. Indicates the tier to be set on the page blob.
+ Tier *PremiumPageBlobAccessTier
+
+ HTTPHeaders *BlobHTTPHeaders
+
+ CpkInfo *CpkInfo
+
+ CpkScopeInfo *CpkScopeInfo
+
+ BlobAccessConditions *BlobAccessConditions
+ // Specifies the date time when the blobs immutability policy is set to expire.
+ ImmutabilityPolicyExpiry *time.Time
+ // Specifies the immutability policy mode to set on the blob.
+ ImmutabilityPolicyMode *BlobImmutabilityPolicyMode
+ // Specified if a legal hold should be set on the blob.
+ LegalHold *bool
+}
+
+func (o *PageBlobCreateOptions) format() (*pageBlobClientCreateOptions, *BlobHTTPHeaders, *LeaseAccessConditions, *CpkInfo, *CpkScopeInfo, *ModifiedAccessConditions) {
+ if o == nil {
+ return nil, nil, nil, nil, nil, nil
+ }
+
+ options := &pageBlobClientCreateOptions{
+ BlobSequenceNumber: o.BlobSequenceNumber,
+ BlobTagsString: serializeBlobTagsToStrPtr(o.BlobTagsMap),
+ Metadata: o.Metadata,
+ Tier: o.Tier,
+ }
+ leaseAccessConditions, modifiedAccessConditions := o.BlobAccessConditions.format()
+ return options, o.HTTPHeaders, leaseAccessConditions, o.CpkInfo, o.CpkScopeInfo, modifiedAccessConditions
+}
+
+// PageBlobCreateResponse contains the response from method PageBlobClient.Create.
+type PageBlobCreateResponse struct {
+ pageBlobClientCreateResponse
+}
+
+func toPageBlobCreateResponse(resp pageBlobClientCreateResponse) PageBlobCreateResponse {
+ return PageBlobCreateResponse{resp}
+}
+
+// ---------------------------------------------------------------------------------------------------------------------
+
+// PageBlobUploadPagesOptions provides set of configurations for UploadPages operation
+type PageBlobUploadPagesOptions struct {
+ // Specify the transactional crc64 for the body, to be validated by the service.
+ PageRange *HttpRange
+ TransactionalContentCRC64 []byte
+ // Specify the transactional md5 for the body, to be validated by the service.
+ TransactionalContentMD5 []byte
+
+ CpkInfo *CpkInfo
+ CpkScopeInfo *CpkScopeInfo
+ SequenceNumberAccessConditions *SequenceNumberAccessConditions
+ BlobAccessConditions *BlobAccessConditions
+}
+
+func (o *PageBlobUploadPagesOptions) format() (*pageBlobClientUploadPagesOptions, *LeaseAccessConditions,
+ *CpkInfo, *CpkScopeInfo, *SequenceNumberAccessConditions, *ModifiedAccessConditions) {
+ if o == nil {
+ return nil, nil, nil, nil, nil, nil
+ }
+
+ options := &pageBlobClientUploadPagesOptions{
+ TransactionalContentCRC64: o.TransactionalContentCRC64,
+ TransactionalContentMD5: o.TransactionalContentMD5,
+ }
+
+ if o.PageRange != nil {
+ options.Range = o.PageRange.format()
+ }
+
+ leaseAccessConditions, modifiedAccessConditions := o.BlobAccessConditions.format()
+ return options, leaseAccessConditions, o.CpkInfo, o.CpkScopeInfo, o.SequenceNumberAccessConditions, modifiedAccessConditions
+}
+
+// PageBlobUploadPagesResponse contains the response from method PageBlobClient.UploadPages.
+type PageBlobUploadPagesResponse struct {
+ pageBlobClientUploadPagesResponse
+}
+
+func toPageBlobUploadPagesResponse(resp pageBlobClientUploadPagesResponse) PageBlobUploadPagesResponse {
+ return PageBlobUploadPagesResponse{resp}
+}
+
+// ---------------------------------------------------------------------------------------------------------------------
+
+// PageBlobUploadPagesFromURLOptions provides set of configurations for UploadPagesFromURL operation
+type PageBlobUploadPagesFromURLOptions struct {
+ // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source.
+ CopySourceAuthorization *string
+ // Specify the md5 calculated for the range of bytes that must be read from the copy source.
+ SourceContentMD5 []byte
+ // Specify the crc64 calculated for the range of bytes that must be read from the copy source.
+ SourceContentCRC64 []byte
+
+ CpkInfo *CpkInfo
+
+ CpkScopeInfo *CpkScopeInfo
+
+ SequenceNumberAccessConditions *SequenceNumberAccessConditions
+
+ SourceModifiedAccessConditions *SourceModifiedAccessConditions
+
+ BlobAccessConditions *BlobAccessConditions
+}
+
+func (o *PageBlobUploadPagesFromURLOptions) format() (*pageBlobClientUploadPagesFromURLOptions, *CpkInfo, *CpkScopeInfo,
+ *LeaseAccessConditions, *SequenceNumberAccessConditions, *ModifiedAccessConditions, *SourceModifiedAccessConditions) {
+ if o == nil {
+ return nil, nil, nil, nil, nil, nil, nil
+ }
+
+ options := &pageBlobClientUploadPagesFromURLOptions{
+ SourceContentMD5: o.SourceContentMD5,
+ SourceContentcrc64: o.SourceContentCRC64,
+ CopySourceAuthorization: o.CopySourceAuthorization,
+ }
+
+ leaseAccessConditions, modifiedAccessConditions := o.BlobAccessConditions.format()
+ return options, o.CpkInfo, o.CpkScopeInfo, leaseAccessConditions, o.SequenceNumberAccessConditions, modifiedAccessConditions, o.SourceModifiedAccessConditions
+}
+
+// PageBlobUploadPagesFromURLResponse contains the response from method PageBlobClient.UploadPagesFromURL
+type PageBlobUploadPagesFromURLResponse struct {
+ pageBlobClientUploadPagesFromURLResponse
+}
+
+func toPageBlobUploadPagesFromURLResponse(resp pageBlobClientUploadPagesFromURLResponse) PageBlobUploadPagesFromURLResponse {
+ return PageBlobUploadPagesFromURLResponse{resp}
+}
+
+// ---------------------------------------------------------------------------------------------------------------------
+
+// PageBlobClearPagesOptions provides set of configurations for PageBlobClient.ClearPages operation
+type PageBlobClearPagesOptions struct {
+ CpkInfo *CpkInfo
+ CpkScopeInfo *CpkScopeInfo
+ SequenceNumberAccessConditions *SequenceNumberAccessConditions
+ BlobAccessConditions *BlobAccessConditions
+}
+
+func (o *PageBlobClearPagesOptions) format() (*LeaseAccessConditions, *CpkInfo,
+ *CpkScopeInfo, *SequenceNumberAccessConditions, *ModifiedAccessConditions) {
+ if o == nil {
+ return nil, nil, nil, nil, nil
+ }
+
+ leaseAccessConditions, modifiedAccessConditions := o.BlobAccessConditions.format()
+ return leaseAccessConditions, o.CpkInfo, o.CpkScopeInfo, o.SequenceNumberAccessConditions, modifiedAccessConditions
+}
+
+// PageBlobClearPagesResponse contains the response from method PageBlobClient.ClearPages
+type PageBlobClearPagesResponse struct {
+ pageBlobClientClearPagesResponse
+}
+
+func toPageBlobClearPagesResponse(resp pageBlobClientClearPagesResponse) PageBlobClearPagesResponse {
+ return PageBlobClearPagesResponse{resp}
+}
+
+// ---------------------------------------------------------------------------------------------------------------------
+
+// PageBlobGetPageRangesOptions provides set of configurations for GetPageRanges operation
+type PageBlobGetPageRangesOptions struct {
+ Marker *string
+ // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value
+ // greater than 5000, the server will return up to 5000 items. Note that if the
+ // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder
+ // of the results. For this reason, it is possible that the service will
+ // return fewer results than specified by maxresults, or than the default of 5000.
+ MaxResults *int32
+ // Optional. This header is only supported in service versions 2019-04-19 and after and specifies the URL of a previous snapshot
+ // of the target blob. The response will only contain pages that were changed
+ // between the target blob and its previous snapshot.
+ PrevSnapshotURL *string
+ // Optional in version 2015-07-08 and newer. The prevsnapshot parameter is a DateTime value that specifies that the response
+ // will contain only pages that were changed between target blob and previous
+ // snapshot. Changed pages include both updated and cleared pages. The target blob may be a snapshot, as long as the snapshot
+ // specified by prevsnapshot is the older of the two. Note that incremental
+ // snapshots are currently supported only for blobs created on or after January 1, 2016.
+ PrevSnapshot *string
+ // Optional, you can specify whether a particular range of the blob is read
+ PageRange *HttpRange
+ // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more
+ // information on working with blob snapshots, see Creating a Snapshot of a Blob.
+ // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob]
+ Snapshot *string
+
+ BlobAccessConditions *BlobAccessConditions
+}
+
+func (o *PageBlobGetPageRangesOptions) format() (*pageBlobClientGetPageRangesOptions, *LeaseAccessConditions, *ModifiedAccessConditions) {
+ if o == nil {
+ return nil, nil, nil
+ }
+
+ leaseAccessConditions, modifiedAccessConditions := o.BlobAccessConditions.format()
+ return &pageBlobClientGetPageRangesOptions{
+ Marker: o.Marker,
+ Maxresults: o.MaxResults,
+ Range: o.PageRange.format(),
+ Snapshot: o.Snapshot,
+ }, leaseAccessConditions, modifiedAccessConditions
+}
+
+// PageBlobGetPageRangesPager provides operations for iterating over paged responses
+type PageBlobGetPageRangesPager struct {
+ *pageBlobClientGetPageRangesPager
+}
+
+func toPageBlobGetPageRangesPager(resp *pageBlobClientGetPageRangesPager) *PageBlobGetPageRangesPager {
+ return &PageBlobGetPageRangesPager{resp}
+}
+
+// ---------------------------------------------------------------------------------------------------------------------
+
+// PageBlobGetPageRangesDiffOptions provides set of configurations for PageBlobClient.GetPageRangesDiff operation
+type PageBlobGetPageRangesDiffOptions struct {
+ // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The
+ // operation returns the NextMarker value within the response body if the listing
+ // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used
+ // as the value for the marker parameter in a subsequent call to request the next
+ // page of list items. The marker value is opaque to the client.
+ Marker *string
+ // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value
+ // greater than 5000, the server will return up to 5000 items. Note that if the
+ // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder
+ // of the results. For this reason, it is possible that the service will
+ // return fewer results than specified by maxresults, or than the default of 5000.
+ MaxResults *int32
+ // Optional. This header is only supported in service versions 2019-04-19 and after and specifies the URL of a previous snapshot
+ // of the target blob. The response will only contain pages that were changed
+ // between the target blob and its previous snapshot.
+ PrevSnapshotURL *string
+ // Optional in version 2015-07-08 and newer. The prevsnapshot parameter is a DateTime value that specifies that the response
+ // will contain only pages that were changed between target blob and previous
+ // snapshot. Changed pages include both updated and cleared pages. The target blob may be a snapshot, as long as the snapshot
+ // specified by prevsnapshot is the older of the two. Note that incremental
+ // snapshots are currently supported only for blobs created on or after January 1, 2016.
+ PrevSnapshot *string
+ // Optional, you can specify whether a particular range of the blob is read
+ PageRange *HttpRange
+
+ // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more
+ // information on working with blob snapshots, see Creating a Snapshot of a Blob.
+ // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob]
+ Snapshot *string
+
+ BlobAccessConditions *BlobAccessConditions
+}
+
+func (o *PageBlobGetPageRangesDiffOptions) format() (*pageBlobClientGetPageRangesDiffOptions, *LeaseAccessConditions, *ModifiedAccessConditions) {
+ if o == nil {
+ return nil, nil, nil
+ }
+
+ leaseAccessConditions, modifiedAccessConditions := o.BlobAccessConditions.format()
+ return &pageBlobClientGetPageRangesDiffOptions{
+ Marker: o.Marker,
+ Maxresults: o.MaxResults,
+ PrevSnapshotURL: o.PrevSnapshotURL,
+ Prevsnapshot: o.PrevSnapshot,
+ Range: o.PageRange.format(),
+ Snapshot: o.Snapshot,
+ }, leaseAccessConditions, modifiedAccessConditions
+
+}
+
+// PageBlobGetPageRangesDiffPager provides operations for iterating over paged responses
+type PageBlobGetPageRangesDiffPager struct {
+ *pageBlobClientGetPageRangesDiffPager
+}
+
+func toPageBlobGetPageRangesDiffPager(resp *pageBlobClientGetPageRangesDiffPager) *PageBlobGetPageRangesDiffPager {
+ return &PageBlobGetPageRangesDiffPager{resp}
+}
+
+// ---------------------------------------------------------------------------------------------------------------------
+
+// PageBlobResizeOptions provides set of configurations for PageBlobClient.Resize operation
+type PageBlobResizeOptions struct {
+ CpkInfo *CpkInfo
+ CpkScopeInfo *CpkScopeInfo
+ BlobAccessConditions *BlobAccessConditions
+}
+
+func (o *PageBlobResizeOptions) format() (*pageBlobClientResizeOptions, *LeaseAccessConditions, *CpkInfo, *CpkScopeInfo, *ModifiedAccessConditions) {
+ if o == nil {
+ return nil, nil, nil, nil, nil
+ }
+
+ leaseAccessConditions, modifiedAccessConditions := o.BlobAccessConditions.format()
+ return nil, leaseAccessConditions, o.CpkInfo, o.CpkScopeInfo, modifiedAccessConditions
+}
+
+// PageBlobResizeResponse contains the response from method PageBlobClient.Resize
+type PageBlobResizeResponse struct {
+ pageBlobClientResizeResponse
+}
+
+func toPageBlobResizeResponse(resp pageBlobClientResizeResponse) PageBlobResizeResponse {
+ return PageBlobResizeResponse{resp}
+}
+
+// ---------------------------------------------------------------------------------------------------------------------
+
+// PageBlobUpdateSequenceNumberOptions provides set of configurations for PageBlobClient.UpdateSequenceNumber operation
+type PageBlobUpdateSequenceNumberOptions struct {
+ ActionType *SequenceNumberActionType
+
+ BlobSequenceNumber *int64
+
+ BlobAccessConditions *BlobAccessConditions
+}
+
+func (o *PageBlobUpdateSequenceNumberOptions) format() (*SequenceNumberActionType, *pageBlobClientUpdateSequenceNumberOptions, *LeaseAccessConditions, *ModifiedAccessConditions) {
+ if o == nil {
+ return nil, nil, nil, nil
+ }
+
+ options := &pageBlobClientUpdateSequenceNumberOptions{
+ BlobSequenceNumber: o.BlobSequenceNumber,
+ }
+
+ if *o.ActionType == SequenceNumberActionTypeIncrement {
+ options.BlobSequenceNumber = nil
+ }
+
+ leaseAccessConditions, modifiedAccessConditions := o.BlobAccessConditions.format()
+ return o.ActionType, options, leaseAccessConditions, modifiedAccessConditions
+}
+
+// PageBlobUpdateSequenceNumberResponse contains the response from method PageBlobClient.UpdateSequenceNumber
+type PageBlobUpdateSequenceNumberResponse struct {
+ pageBlobClientUpdateSequenceNumberResponse
+}
+
+func toPageBlobUpdateSequenceNumberResponse(resp pageBlobClientUpdateSequenceNumberResponse) PageBlobUpdateSequenceNumberResponse {
+ return PageBlobUpdateSequenceNumberResponse{resp}
+}
+
+// ---------------------------------------------------------------------------------------------------------------------
+
+// PageBlobCopyIncrementalOptions provides set of configurations for PageBlobClient.StartCopyIncremental operation
+type PageBlobCopyIncrementalOptions struct {
+ ModifiedAccessConditions *ModifiedAccessConditions
+}
+
+func (o *PageBlobCopyIncrementalOptions) format() (*pageBlobClientCopyIncrementalOptions, *ModifiedAccessConditions) {
+ if o == nil {
+ return nil, nil
+ }
+
+ return nil, o.ModifiedAccessConditions
+}
+
+// PageBlobCopyIncrementalResponse contains the response from method PageBlobClient.StartCopyIncremental
+type PageBlobCopyIncrementalResponse struct {
+ pageBlobClientCopyIncrementalResponse
+}
+
+func toPageBlobCopyIncrementalResponse(resp pageBlobClientCopyIncrementalResponse) PageBlobCopyIncrementalResponse {
+ return PageBlobCopyIncrementalResponse{resp}
+}
+
+// ---------------------------------------------------------------------------------------------------------------------
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_page_blob_request_options.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_page_blob_request_options.go
deleted file mode 100644
index b587fe47605..00000000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_page_blob_request_options.go
+++ /dev/null
@@ -1,197 +0,0 @@
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License.
-
-package azblob
-
-import (
- "strconv"
-)
-
-func rangeToString(offset, count int64) string {
- return "bytes=" + strconv.FormatInt(offset, 10) + "-" + strconv.FormatInt(offset+count-1, 10)
-}
-
-type CreatePageBlobOptions struct {
- // Set for page blobs only. The sequence number is a user-controlled value that you can use to track requests. The value of
- // the sequence number must be between 0 and 2^63 - 1.
- BlobSequenceNumber *int64
- // Optional. Used to set blob tags in various blob operations.
- TagsMap map[string]string
- // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the
- // operation will copy the metadata from the source blob or file to the destination blob. If one or more name-value pairs
- // are specified, the destination blob is created with the specified metadata, and metadata is not copied from the source
- // blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers.
- // See Naming and Referencing Containers, Blobs, and Metadata for more information.
- Metadata map[string]string
- // Optional. Indicates the tier to be set on the page blob.
- Tier *PremiumPageBlobAccessTier
-
- HTTPHeaders *BlobHTTPHeaders
- CpkInfo *CpkInfo
- CpkScopeInfo *CpkScopeInfo
- BlobAccessConditions *BlobAccessConditions
-}
-
-func (o *CreatePageBlobOptions) pointers() (*PageBlobCreateOptions, *BlobHTTPHeaders, *CpkInfo, *CpkScopeInfo, *LeaseAccessConditions, *ModifiedAccessConditions) {
- if o == nil {
- return nil, nil, nil, nil, nil, nil
- }
-
- options := &PageBlobCreateOptions{
- BlobSequenceNumber: o.BlobSequenceNumber,
- BlobTagsString: serializeBlobTagsToStrPtr(o.TagsMap),
- Metadata: o.Metadata,
- Tier: o.Tier,
- }
- leaseAccessConditions, modifiedAccessConditions := o.BlobAccessConditions.pointers()
- return options, o.HTTPHeaders, o.CpkInfo, o.CpkScopeInfo, leaseAccessConditions, modifiedAccessConditions
-}
-
-type UploadPagesOptions struct {
- // Specify the transactional crc64 for the body, to be validated by the service.
- PageRange *HttpRange
- TransactionalContentCRC64 []byte
- // Specify the transactional md5 for the body, to be validated by the service.
- TransactionalContentMD5 []byte
-
- CpkInfo *CpkInfo
- CpkScopeInfo *CpkScopeInfo
- SequenceNumberAccessConditions *SequenceNumberAccessConditions
- BlobAccessConditions *BlobAccessConditions
-}
-
-func (o *UploadPagesOptions) pointers() (*PageBlobUploadPagesOptions, *CpkInfo, *CpkScopeInfo, *SequenceNumberAccessConditions, *LeaseAccessConditions, *ModifiedAccessConditions) {
- if o == nil {
- return nil, nil, nil, nil, nil, nil
- }
-
- options := &PageBlobUploadPagesOptions{
- TransactionalContentCRC64: o.TransactionalContentCRC64,
- TransactionalContentMD5: o.TransactionalContentMD5,
- }
-
- if o.PageRange != nil {
- options.Range = o.PageRange.pointers()
- }
-
- leaseAccessConditions, modifiedAccessConditions := o.BlobAccessConditions.pointers()
- return options, o.CpkInfo, o.CpkScopeInfo, o.SequenceNumberAccessConditions, leaseAccessConditions, modifiedAccessConditions
-}
-
-type UploadPagesFromURLOptions struct {
- // Specify the md5 calculated for the range of bytes that must be read from the copy source.
- SourceContentMD5 []byte
- // Specify the crc64 calculated for the range of bytes that must be read from the copy source.
- SourceContentcrc64 []byte
-
- CpkInfo *CpkInfo
- CpkScopeInfo *CpkScopeInfo
- SequenceNumberAccessConditions *SequenceNumberAccessConditions
- SourceModifiedAccessConditions *SourceModifiedAccessConditions
- BlobAccessConditions *BlobAccessConditions
-}
-
-func (o *UploadPagesFromURLOptions) pointers() (*PageBlobUploadPagesFromURLOptions, *CpkInfo, *CpkScopeInfo, *SequenceNumberAccessConditions, *SourceModifiedAccessConditions, *LeaseAccessConditions, *ModifiedAccessConditions) {
- if o == nil {
- return nil, nil, nil, nil, nil, nil, nil
- }
-
- options := &PageBlobUploadPagesFromURLOptions{
- SourceContentMD5: o.SourceContentMD5,
- SourceContentcrc64: o.SourceContentcrc64,
- }
-
- leaseAccessConditions, modifiedAccessConditions := o.BlobAccessConditions.pointers()
- return options, o.CpkInfo, o.CpkScopeInfo, o.SequenceNumberAccessConditions, o.SourceModifiedAccessConditions, leaseAccessConditions, modifiedAccessConditions
-}
-
-type ClearPagesOptions struct {
- CpkInfo *CpkInfo
- CpkScopeInfo *CpkScopeInfo
- SequenceNumberAccessConditions *SequenceNumberAccessConditions
- BlobAccessConditions *BlobAccessConditions
-}
-
-func (o *ClearPagesOptions) pointers() (*CpkInfo, *CpkScopeInfo, *SequenceNumberAccessConditions, *LeaseAccessConditions, *ModifiedAccessConditions) {
- if o == nil {
- return nil, nil, nil, nil, nil
- }
-
- leaseAccessConditions, modifiedAccessConditions := o.BlobAccessConditions.pointers()
- return o.CpkInfo, o.CpkScopeInfo, o.SequenceNumberAccessConditions, leaseAccessConditions, modifiedAccessConditions
-}
-
-type GetPageRangesOptions struct {
- Snapshot *string
-
- BlobAccessConditions *BlobAccessConditions
-}
-
-func (o *GetPageRangesOptions) pointers() (*string, *LeaseAccessConditions, *ModifiedAccessConditions) {
- if o == nil {
- return nil, nil, nil
- }
-
- leaseAccessConditions, modifiedAccessConditions := o.BlobAccessConditions.pointers()
- return o.Snapshot, leaseAccessConditions, modifiedAccessConditions
-}
-
-type ResizePageBlobOptions struct {
- CpkInfo *CpkInfo
- CpkScopeInfo *CpkScopeInfo
- BlobAccessConditions *BlobAccessConditions
-}
-
-func (o *ResizePageBlobOptions) pointers() (*CpkInfo, *CpkScopeInfo, *LeaseAccessConditions, *ModifiedAccessConditions) {
- if o == nil {
- return nil, nil, nil, nil
- }
-
- leaseAccessConditions, modifiedAccessConditions := o.BlobAccessConditions.pointers()
- return o.CpkInfo, o.CpkScopeInfo, leaseAccessConditions, modifiedAccessConditions
-}
-
-type UpdateSequenceNumberPageBlob struct {
- ActionType *SequenceNumberActionType
- BlobSequenceNumber *int64
-
- BlobAccessConditions *BlobAccessConditions
-}
-
-func (o *UpdateSequenceNumberPageBlob) pointers() (*PageBlobUpdateSequenceNumberOptions, *SequenceNumberActionType, *LeaseAccessConditions, *ModifiedAccessConditions) {
- if o == nil {
- return nil, nil, nil, nil
- }
-
- options := &PageBlobUpdateSequenceNumberOptions{
- BlobSequenceNumber: o.BlobSequenceNumber,
- }
-
- if *o.ActionType == SequenceNumberActionTypeIncrement {
- options.BlobSequenceNumber = nil
- }
-
- leaseAccessConditions, modifiedAccessConditions := o.BlobAccessConditions.pointers()
- return options, o.ActionType, leaseAccessConditions, modifiedAccessConditions
-}
-
-type CopyIncrementalPageBlobOptions struct {
- ModifiedAccessConditions *ModifiedAccessConditions
-
- RequestID *string
-
- Timeout *int32
-}
-
-func (o *CopyIncrementalPageBlobOptions) pointers() (*PageBlobCopyIncrementalOptions, *ModifiedAccessConditions) {
- if o == nil {
- return nil, nil
- }
-
- options := PageBlobCopyIncrementalOptions{
- RequestID: o.RequestID,
- Timeout: o.Timeout,
- }
-
- return &options, o.ModifiedAccessConditions
-}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zu_serialize_and_desearilize.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_serialize_and_desearilize_util.go
similarity index 63%
rename from vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zu_serialize_and_desearilize.go
rename to vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_serialize_and_desearilize_util.go
index 988ef6960ec..3cf85ca43b1 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zu_serialize_and_desearilize.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_serialize_and_desearilize_util.go
@@ -1,9 +1,40 @@
+//go:build go1.18
+// +build go1.18
+
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package azblob
-import "strings"
+import (
+ "net/url"
+ "strings"
+)
+
+func serializeBlobTagsToStrPtr(tagsMap map[string]string) *string {
+ if tagsMap == nil {
+ return nil
+ }
+ tags := make([]string, 0)
+ for key, val := range tagsMap {
+ tags = append(tags, url.QueryEscape(key)+"="+url.QueryEscape(val))
+ }
+ //tags = tags[:len(tags)-1]
+ blobTagsString := strings.Join(tags, "&")
+ return &blobTagsString
+}
+
+func serializeBlobTags(tagsMap map[string]string) *BlobTags {
+ if tagsMap == nil {
+ return nil
+ }
+ blobTagSet := make([]*BlobTag, 0)
+ for key, val := range tagsMap {
+ newKey, newVal := key, val
+ blobTagSet = append(blobTagSet, &BlobTag{Key: &newKey, Value: &newVal})
+ }
+ return &BlobTags{BlobTagSet: blobTagSet}
+}
func deserializeORSPolicies(policies map[string]string) (objectReplicationPolicies []ObjectReplicationPolicy) {
if policies == nil {
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_service_client_util.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_service_client_util.go
new file mode 100644
index 00000000000..747a94ee245
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_service_client_util.go
@@ -0,0 +1,226 @@
+//go:build go1.18
+// +build go1.18
+
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+package azblob
+
+// ---------------------------------------------------------------------------------------------------------------------
+
+// ServiceGetAccountInfoOptions provides set of options for ServiceClient.GetAccountInfo
+type ServiceGetAccountInfoOptions struct {
+ // placeholder for future options
+}
+
+func (o *ServiceGetAccountInfoOptions) format() *serviceClientGetAccountInfoOptions {
+ return nil
+}
+
+// ServiceGetAccountInfoResponse contains the response from ServiceClient.GetAccountInfo
+type ServiceGetAccountInfoResponse struct {
+ serviceClientGetAccountInfoResponse
+}
+
+func toServiceGetAccountInfoResponse(resp serviceClientGetAccountInfoResponse) ServiceGetAccountInfoResponse {
+ return ServiceGetAccountInfoResponse{resp}
+}
+
+// ---------------------------------------------------------------------------------------------------------------------
+
+// ListContainersDetail indicates what additional information the service should return with each container.
+type ListContainersDetail struct {
+ // Tells the service whether to return metadata for each container.
+ Metadata bool
+
+ // Tells the service whether to return soft-deleted containers.
+ Deleted bool
+}
+
+// string produces the `Include` query parameter's value.
+func (o *ListContainersDetail) format() []ListContainersIncludeType {
+ if !o.Metadata && !o.Deleted {
+ return nil
+ }
+
+ items := make([]ListContainersIncludeType, 0, 2)
+ // NOTE: Multiple strings MUST be appended in alphabetic order or signing the string for authentication fails!
+ if o.Deleted {
+ items = append(items, ListContainersIncludeTypeDeleted)
+ }
+ if o.Metadata {
+ items = append(items, ListContainersIncludeTypeMetadata)
+ }
+ return items
+}
+
+// ListContainersOptions provides set of configurations for ListContainers operation
+type ListContainersOptions struct {
+ Include ListContainersDetail
+
+ // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The
+ // operation returns the NextMarker value within the response body if the listing operation did not return all containers
+ // remaining to be listed with the current page. The NextMarker value can be used as the value for the marker parameter in
+ // a subsequent call to request the next page of list items. The marker value is opaque to the client.
+ Marker *string
+
+ // Specifies the maximum number of containers to return. If the request does not specify max results, or specifies a value
+ // greater than 5000, the server will return up to 5000 items. Note that if the listing operation crosses a partition boundary,
+ // then the service will return a continuation token for retrieving the remainder of the results. For this reason, it is possible
+ // that the service will return fewer results than specified by max results, or than the default of 5000.
+ MaxResults *int32
+
+ // Filters the results to return only containers whose name begins with the specified prefix.
+ Prefix *string
+}
+
+func (o *ListContainersOptions) format() *serviceClientListContainersSegmentOptions {
+ if o == nil {
+ return nil
+ }
+
+ return &serviceClientListContainersSegmentOptions{
+ Include: o.Include.format(),
+ Marker: o.Marker,
+ Maxresults: o.MaxResults,
+ Prefix: o.Prefix,
+ }
+}
+
+// ServiceListContainersSegmentPager provides operations for iterating over paged responses.
+type ServiceListContainersSegmentPager struct {
+ serviceClientListContainersSegmentPager
+}
+
+func toServiceListContainersSegmentPager(resp serviceClientListContainersSegmentPager) *ServiceListContainersSegmentPager {
+ return &ServiceListContainersSegmentPager{resp}
+}
+
+// ---------------------------------------------------------------------------------------------------------------------
+
+// ServiceGetPropertiesOptions provides set of options for ServiceClient.GetProperties
+type ServiceGetPropertiesOptions struct {
+ // placeholder for future options
+}
+
+func (o *ServiceGetPropertiesOptions) format() *serviceClientGetPropertiesOptions {
+ return nil
+}
+
+// ServiceGetPropertiesResponse contains the response from ServiceClient.GetProperties
+type ServiceGetPropertiesResponse struct {
+ serviceClientGetPropertiesResponse
+}
+
+func toServiceGetPropertiesResponse(resp serviceClientGetPropertiesResponse) ServiceGetPropertiesResponse {
+ return ServiceGetPropertiesResponse{resp}
+}
+
+// ---------------------------------------------------------------------------------------------------------------------
+
+// ServiceSetPropertiesOptions provides set of options for ServiceClient.SetProperties
+type ServiceSetPropertiesOptions struct {
+ // The set of CORS rules.
+ Cors []*CorsRule
+
+ // The default version to use for requests to the Blob service if an incoming request's version is not specified. Possible
+ // values include version 2008-10-27 and all more recent versions
+ DefaultServiceVersion *string
+
+ // the retention policy which determines how long the associated data should persist
+ DeleteRetentionPolicy *RetentionPolicy
+
+ // a summary of request statistics grouped by API in hour or minute aggregates for blobs
+ HourMetrics *Metrics
+
+ // Azure Analytics Logging settings.
+ Logging *Logging
+
+ // a summary of request statistics grouped by API in hour or minute aggregates for blobs
+ MinuteMetrics *Metrics
+
+ // The properties that enable an account to host a static website
+ StaticWebsite *StaticWebsite
+}
+
+func (o *ServiceSetPropertiesOptions) format() (StorageServiceProperties, *serviceClientSetPropertiesOptions) {
+ if o == nil {
+ return StorageServiceProperties{}, nil
+ }
+
+ return StorageServiceProperties{
+ Cors: o.Cors,
+ DefaultServiceVersion: o.DefaultServiceVersion,
+ DeleteRetentionPolicy: o.DeleteRetentionPolicy,
+ HourMetrics: o.HourMetrics,
+ Logging: o.Logging,
+ MinuteMetrics: o.MinuteMetrics,
+ StaticWebsite: o.StaticWebsite,
+ }, nil
+}
+
+// ServiceSetPropertiesResponse contains the response from ServiceClient.SetProperties
+type ServiceSetPropertiesResponse struct {
+ serviceClientSetPropertiesResponse
+}
+
+func toServiceSetPropertiesResponse(resp serviceClientSetPropertiesResponse) ServiceSetPropertiesResponse {
+ return ServiceSetPropertiesResponse{resp}
+}
+
+// ---------------------------------------------------------------------------------------------------------------------
+
+// ServiceGetStatisticsOptions provides set of options for ServiceClient.GetStatistics
+type ServiceGetStatisticsOptions struct {
+ // placeholder for future options
+}
+
+func (o *ServiceGetStatisticsOptions) format() *serviceClientGetStatisticsOptions {
+ return nil
+}
+
+// ServiceGetStatisticsResponse contains the response from ServiceClient.GetStatistics.
+type ServiceGetStatisticsResponse struct {
+ serviceClientGetStatisticsResponse
+}
+
+func toServiceGetStatisticsResponse(resp serviceClientGetStatisticsResponse) ServiceGetStatisticsResponse {
+ return ServiceGetStatisticsResponse{resp}
+}
+
+// ---------------------------------------------------------------------------------------------------------------------
+
+// ServiceFilterBlobsOptions provides set of configurations for ServiceClient.FindBlobsByTags
+type ServiceFilterBlobsOptions struct {
+ // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The operation returns the NextMarker
+ // value within the response body if the listing operation did not return all containers remaining to be listed with the current page. The NextMarker value
+ // can be used as the value for the marker parameter in a subsequent call to request the next page of list items. The marker value is opaque to the client.
+ Marker *string
+ // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value greater than 5000, the server
+ // will return up to 5000 items. Note that if the listing operation crosses a partition boundary, then the service will return a continuation token for
+ // retrieving the remainder of the results. For this reason, it is possible that the service will return fewer results than specified by maxresults, or
+ // than the default of 5000.
+ MaxResults *int32
+ // Filters the results to return only to return only blobs whose tags match the specified expression.
+ Where *string
+}
+
+func (o *ServiceFilterBlobsOptions) pointer() *serviceClientFilterBlobsOptions {
+ if o == nil {
+ return nil
+ }
+ return &serviceClientFilterBlobsOptions{
+ Marker: o.Marker,
+ Maxresults: o.MaxResults,
+ Where: o.Where,
+ }
+}
+
+// ServiceFilterBlobsResponse contains the response from ServiceClient.FindBlobsByTags
+type ServiceFilterBlobsResponse struct {
+ serviceClientFilterBlobsResponse
+}
+
+func toServiceFilterBlobsResponse(resp serviceClientFilterBlobsResponse) ServiceFilterBlobsResponse {
+ return ServiceFilterBlobsResponse{resp}
+}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_service_request_options.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_service_request_options.go
deleted file mode 100644
index 79376a8d49e..00000000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_service_request_options.go
+++ /dev/null
@@ -1,87 +0,0 @@
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License.
-
-package azblob
-
-type ListContainersOptions struct {
- Include ListContainersDetail
-
- // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The
- // operation returns the NextMarker value within the response body if the listing operation did not return all containers
- // remaining to be listed with the current page. The NextMarker value can be used as the value for the marker parameter in
- // a subsequent call to request the next page of list items. The marker value is opaque to the client.
- Marker *string
-
- // Specifies the maximum number of containers to return. If the request does not specify max results, or specifies a value
- // greater than 5000, the server will return up to 5000 items. Note that if the listing operation crosses a partition boundary,
- // then the service will return a continuation token for retrieving the remainder of the results. For this reason, it is possible
- // that the service will return fewer results than specified by max results, or than the default of 5000.
- MaxResults *int32
-
- // Filters the results to return only containers whose name begins with the specified prefix.
- Prefix *string
-}
-
-func (o *ListContainersOptions) pointers() *ServiceListContainersSegmentOptions {
- if o == nil {
- return nil
- }
-
- return &ServiceListContainersSegmentOptions{
- Include: o.Include.pointers(),
- Marker: o.Marker,
- Maxresults: o.MaxResults,
- Prefix: o.Prefix,
- }
-}
-
-// ListContainersDetail indicates what additional information the service should return with each container.
-type ListContainersDetail struct {
- // Tells the service whether to return metadata for each container.
- Metadata bool
-
- // Tells the service whether to return soft-deleted containers.
- Deleted bool
-}
-
-// string produces the Include query parameter's value.
-func (o *ListContainersDetail) pointers() []ListContainersIncludeType {
- if !o.Metadata && !o.Deleted {
- return nil
- }
-
- items := make([]ListContainersIncludeType, 0, 2)
- // NOTE: Multiple strings MUST be appended in alphabetic order or signing the string for authentication fails!
- if o.Deleted {
- items = append(items, ListContainersIncludeTypeDeleted)
- }
- if o.Metadata {
- items = append(items, ListContainersIncludeTypeMetadata)
- }
- return items
-}
-
-type ServiceFilterBlobsByTagsOptions struct {
- // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The operation returns the NextMarker
- // value within the response body if the listing operation did not return all containers remaining to be listed with the current page. The NextMarker value
- // can be used as the value for the marker parameter in a subsequent call to request the next page of list items. The marker value is opaque to the client.
- Marker *string
- // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value greater than 5000, the server
- // will return up to 5000 items. Note that if the listing operation crosses a partition boundary, then the service will return a continuation token for
- // retrieving the remainder of the results. For this reason, it is possible that the service will return fewer results than specified by maxresults, or
- // than the default of 5000.
- Maxresults *int32
- // Filters the results to return only to return only blobs whose tags match the specified expression.
- Where *string
-}
-
-func (o *ServiceFilterBlobsByTagsOptions) pointer() *ServiceFilterBlobsOptions {
- if o == nil {
- return nil
- }
- return &ServiceFilterBlobsOptions{
- Marker: o.Marker,
- Maxresults: o.Maxresults,
- Where: o.Where,
- }
-}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_appendblob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_appendblob_client.go
index 6a2b7ace598..ca5aac8cd74 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_appendblob_client.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_appendblob_client.go
@@ -1,5 +1,5 @@
-//go:build go1.16
-// +build go1.16
+//go:build go1.18
+// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
@@ -11,56 +11,77 @@ package azblob
import (
"context"
"encoding/base64"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
"io"
"net/http"
"strconv"
"time"
-
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
)
type appendBlobClient struct {
- con *connection
+ endpoint string
+ pl runtime.Pipeline
}
-// AppendBlock - The Append Block operation commits a new block of data to the end of an existing append blob. The Append Block operation is permitted only
-// if the blob was created with x-ms-blob-type set to
+// newAppendBlobClient creates a new instance of appendBlobClient with the specified values.
+// endpoint - The URL of the service account, container, or blob that is the target of the desired operation.
+// pl - the pipeline used for sending requests and handling responses.
+func newAppendBlobClient(endpoint string, pl runtime.Pipeline) *appendBlobClient {
+ client := &appendBlobClient{
+ endpoint: endpoint,
+ pl: pl,
+ }
+ return client
+}
+
+// AppendBlock - The Append Block operation commits a new block of data to the end of an existing append blob. The Append
+// Block operation is permitted only if the blob was created with x-ms-blob-type set to
// AppendBlob. Append Block is supported only on version 2015-02-21 version or later.
-// If the operation fails it returns the *StorageError error type.
-func (client *appendBlobClient) AppendBlock(ctx context.Context, contentLength int64, body io.ReadSeekCloser, appendBlobAppendBlockOptions *AppendBlobAppendBlockOptions, leaseAccessConditions *LeaseAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (AppendBlobAppendBlockResponse, error) {
- req, err := client.appendBlockCreateRequest(ctx, contentLength, body, appendBlobAppendBlockOptions, leaseAccessConditions, appendPositionAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions)
+// If the operation fails it returns an *azcore.ResponseError type.
+// contentLength - The length of the request.
+// body - Initial data
+// appendBlobClientAppendBlockOptions - appendBlobClientAppendBlockOptions contains the optional parameters for the appendBlobClient.AppendBlock
+// method.
+// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method.
+// AppendPositionAccessConditions - AppendPositionAccessConditions contains a group of parameters for the appendBlobClient.AppendBlock
+// method.
+// CpkInfo - CpkInfo contains a group of parameters for the blobClient.Download method.
+// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the blobClient.SetMetadata method.
+// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method.
+func (client *appendBlobClient) AppendBlock(ctx context.Context, contentLength int64, body io.ReadSeekCloser, appendBlobClientAppendBlockOptions *appendBlobClientAppendBlockOptions, leaseAccessConditions *LeaseAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (appendBlobClientAppendBlockResponse, error) {
+ req, err := client.appendBlockCreateRequest(ctx, contentLength, body, appendBlobClientAppendBlockOptions, leaseAccessConditions, appendPositionAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions)
if err != nil {
- return AppendBlobAppendBlockResponse{}, err
+ return appendBlobClientAppendBlockResponse{}, err
}
- resp, err := client.con.Pipeline().Do(req)
+ resp, err := client.pl.Do(req)
if err != nil {
- return AppendBlobAppendBlockResponse{}, err
+ return appendBlobClientAppendBlockResponse{}, err
}
if !runtime.HasStatusCode(resp, http.StatusCreated) {
- return AppendBlobAppendBlockResponse{}, runtime.NewResponseError(resp)
+ return appendBlobClientAppendBlockResponse{}, runtime.NewResponseError(resp)
}
return client.appendBlockHandleResponse(resp)
}
// appendBlockCreateRequest creates the AppendBlock request.
-func (client *appendBlobClient) appendBlockCreateRequest(ctx context.Context, contentLength int64, body io.ReadSeekCloser, appendBlobAppendBlockOptions *AppendBlobAppendBlockOptions, leaseAccessConditions *LeaseAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) {
- req, err := runtime.NewRequest(ctx, http.MethodPut, client.con.Endpoint())
+func (client *appendBlobClient) appendBlockCreateRequest(ctx context.Context, contentLength int64, body io.ReadSeekCloser, appendBlobClientAppendBlockOptions *appendBlobClientAppendBlockOptions, leaseAccessConditions *LeaseAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) {
+ req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint)
if err != nil {
return nil, err
}
reqQP := req.Raw().URL.Query()
reqQP.Set("comp", "appendblock")
- if appendBlobAppendBlockOptions != nil && appendBlobAppendBlockOptions.Timeout != nil {
- reqQP.Set("timeout", strconv.FormatInt(int64(*appendBlobAppendBlockOptions.Timeout), 10))
+ if appendBlobClientAppendBlockOptions != nil && appendBlobClientAppendBlockOptions.Timeout != nil {
+ reqQP.Set("timeout", strconv.FormatInt(int64(*appendBlobClientAppendBlockOptions.Timeout), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
req.Raw().Header.Set("Content-Length", strconv.FormatInt(contentLength, 10))
- if appendBlobAppendBlockOptions != nil && appendBlobAppendBlockOptions.TransactionalContentMD5 != nil {
- req.Raw().Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(appendBlobAppendBlockOptions.TransactionalContentMD5))
+ if appendBlobClientAppendBlockOptions != nil && appendBlobClientAppendBlockOptions.TransactionalContentMD5 != nil {
+ req.Raw().Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(appendBlobClientAppendBlockOptions.TransactionalContentMD5))
}
- if appendBlobAppendBlockOptions != nil && appendBlobAppendBlockOptions.TransactionalContentCRC64 != nil {
- req.Raw().Header.Set("x-ms-content-crc64", base64.StdEncoding.EncodeToString(appendBlobAppendBlockOptions.TransactionalContentCRC64))
+ if appendBlobClientAppendBlockOptions != nil && appendBlobClientAppendBlockOptions.TransactionalContentCRC64 != nil {
+ req.Raw().Header.Set("x-ms-content-crc64", base64.StdEncoding.EncodeToString(appendBlobClientAppendBlockOptions.TransactionalContentCRC64))
}
if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID)
@@ -78,7 +99,7 @@ func (client *appendBlobClient) appendBlockCreateRequest(ctx context.Context, co
req.Raw().Header.Set("x-ms-encryption-key-sha256", *cpkInfo.EncryptionKeySHA256)
}
if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil {
- req.Raw().Header.Set("x-ms-encryption-algorithm", "AES256")
+ req.Raw().Header.Set("x-ms-encryption-algorithm", string(*cpkInfo.EncryptionAlgorithm))
}
if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil {
req.Raw().Header.Set("x-ms-encryption-scope", *cpkScopeInfo.EncryptionScope)
@@ -98,38 +119,38 @@ func (client *appendBlobClient) appendBlockCreateRequest(ctx context.Context, co
if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil {
req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags)
}
- req.Raw().Header.Set("x-ms-version", "2019-12-12")
- if appendBlobAppendBlockOptions != nil && appendBlobAppendBlockOptions.RequestID != nil {
- req.Raw().Header.Set("x-ms-client-request-id", *appendBlobAppendBlockOptions.RequestID)
+ req.Raw().Header.Set("x-ms-version", "2020-10-02")
+ if appendBlobClientAppendBlockOptions != nil && appendBlobClientAppendBlockOptions.RequestID != nil {
+ req.Raw().Header.Set("x-ms-client-request-id", *appendBlobClientAppendBlockOptions.RequestID)
}
req.Raw().Header.Set("Accept", "application/xml")
return req, req.SetBody(body, "application/octet-stream")
}
// appendBlockHandleResponse handles the AppendBlock response.
-func (client *appendBlobClient) appendBlockHandleResponse(resp *http.Response) (AppendBlobAppendBlockResponse, error) {
- result := AppendBlobAppendBlockResponse{RawResponse: resp}
+func (client *appendBlobClient) appendBlockHandleResponse(resp *http.Response) (appendBlobClientAppendBlockResponse, error) {
+ result := appendBlobClientAppendBlockResponse{RawResponse: resp}
if val := resp.Header.Get("ETag"); val != "" {
result.ETag = &val
}
if val := resp.Header.Get("Last-Modified"); val != "" {
lastModified, err := time.Parse(time.RFC1123, val)
if err != nil {
- return AppendBlobAppendBlockResponse{}, err
+ return appendBlobClientAppendBlockResponse{}, err
}
result.LastModified = &lastModified
}
if val := resp.Header.Get("Content-MD5"); val != "" {
contentMD5, err := base64.StdEncoding.DecodeString(val)
if err != nil {
- return AppendBlobAppendBlockResponse{}, err
+ return appendBlobClientAppendBlockResponse{}, err
}
result.ContentMD5 = contentMD5
}
if val := resp.Header.Get("x-ms-content-crc64"); val != "" {
xMSContentCRC64, err := base64.StdEncoding.DecodeString(val)
if err != nil {
- return AppendBlobAppendBlockResponse{}, err
+ return appendBlobClientAppendBlockResponse{}, err
}
result.XMSContentCRC64 = xMSContentCRC64
}
@@ -145,7 +166,7 @@ func (client *appendBlobClient) appendBlockHandleResponse(resp *http.Response) (
if val := resp.Header.Get("Date"); val != "" {
date, err := time.Parse(time.RFC1123, val)
if err != nil {
- return AppendBlobAppendBlockResponse{}, err
+ return appendBlobClientAppendBlockResponse{}, err
}
result.Date = &date
}
@@ -156,14 +177,14 @@ func (client *appendBlobClient) appendBlockHandleResponse(resp *http.Response) (
blobCommittedBlockCount32, err := strconv.ParseInt(val, 10, 32)
blobCommittedBlockCount := int32(blobCommittedBlockCount32)
if err != nil {
- return AppendBlobAppendBlockResponse{}, err
+ return appendBlobClientAppendBlockResponse{}, err
}
result.BlobCommittedBlockCount = &blobCommittedBlockCount
}
if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" {
isServerEncrypted, err := strconv.ParseBool(val)
if err != nil {
- return AppendBlobAppendBlockResponse{}, err
+ return appendBlobClientAppendBlockResponse{}, err
}
result.IsServerEncrypted = &isServerEncrypted
}
@@ -176,50 +197,62 @@ func (client *appendBlobClient) appendBlockHandleResponse(resp *http.Response) (
return result, nil
}
-// AppendBlockFromURL - The Append Block operation commits a new block of data to the end of an existing append blob where the contents are read from a
-// source url. The Append Block operation is permitted only if the blob was
+// AppendBlockFromURL - The Append Block operation commits a new block of data to the end of an existing append blob where
+// the contents are read from a source url. The Append Block operation is permitted only if the blob was
// created with x-ms-blob-type set to AppendBlob. Append Block is supported only on version 2015-02-21 version or later.
-// If the operation fails it returns the *StorageError error type.
-func (client *appendBlobClient) AppendBlockFromURL(ctx context.Context, sourceURL string, contentLength int64, appendBlobAppendBlockFromURLOptions *AppendBlobAppendBlockFromURLOptions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, leaseAccessConditions *LeaseAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (AppendBlobAppendBlockFromURLResponse, error) {
- req, err := client.appendBlockFromURLCreateRequest(ctx, sourceURL, contentLength, appendBlobAppendBlockFromURLOptions, cpkInfo, cpkScopeInfo, leaseAccessConditions, appendPositionAccessConditions, modifiedAccessConditions, sourceModifiedAccessConditions)
+// If the operation fails it returns an *azcore.ResponseError type.
+// sourceURL - Specify a URL to the copy source.
+// contentLength - The length of the request.
+// appendBlobClientAppendBlockFromURLOptions - appendBlobClientAppendBlockFromURLOptions contains the optional parameters
+// for the appendBlobClient.AppendBlockFromURL method.
+// CpkInfo - CpkInfo contains a group of parameters for the blobClient.Download method.
+// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the blobClient.SetMetadata method.
+// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method.
+// AppendPositionAccessConditions - AppendPositionAccessConditions contains a group of parameters for the appendBlobClient.AppendBlock
+// method.
+// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method.
+// SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the blobClient.StartCopyFromURL
+// method.
+func (client *appendBlobClient) AppendBlockFromURL(ctx context.Context, sourceURL string, contentLength int64, appendBlobClientAppendBlockFromURLOptions *appendBlobClientAppendBlockFromURLOptions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, leaseAccessConditions *LeaseAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (appendBlobClientAppendBlockFromURLResponse, error) {
+ req, err := client.appendBlockFromURLCreateRequest(ctx, sourceURL, contentLength, appendBlobClientAppendBlockFromURLOptions, cpkInfo, cpkScopeInfo, leaseAccessConditions, appendPositionAccessConditions, modifiedAccessConditions, sourceModifiedAccessConditions)
if err != nil {
- return AppendBlobAppendBlockFromURLResponse{}, err
+ return appendBlobClientAppendBlockFromURLResponse{}, err
}
- resp, err := client.con.Pipeline().Do(req)
+ resp, err := client.pl.Do(req)
if err != nil {
- return AppendBlobAppendBlockFromURLResponse{}, err
+ return appendBlobClientAppendBlockFromURLResponse{}, err
}
if !runtime.HasStatusCode(resp, http.StatusCreated) {
- return AppendBlobAppendBlockFromURLResponse{}, runtime.NewResponseError(resp)
+ return appendBlobClientAppendBlockFromURLResponse{}, runtime.NewResponseError(resp)
}
return client.appendBlockFromURLHandleResponse(resp)
}
// appendBlockFromURLCreateRequest creates the AppendBlockFromURL request.
-func (client *appendBlobClient) appendBlockFromURLCreateRequest(ctx context.Context, sourceURL string, contentLength int64, appendBlobAppendBlockFromURLOptions *AppendBlobAppendBlockFromURLOptions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, leaseAccessConditions *LeaseAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (*policy.Request, error) {
- req, err := runtime.NewRequest(ctx, http.MethodPut, client.con.Endpoint())
+func (client *appendBlobClient) appendBlockFromURLCreateRequest(ctx context.Context, sourceURL string, contentLength int64, appendBlobClientAppendBlockFromURLOptions *appendBlobClientAppendBlockFromURLOptions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, leaseAccessConditions *LeaseAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (*policy.Request, error) {
+ req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint)
if err != nil {
return nil, err
}
reqQP := req.Raw().URL.Query()
reqQP.Set("comp", "appendblock")
- if appendBlobAppendBlockFromURLOptions != nil && appendBlobAppendBlockFromURLOptions.Timeout != nil {
- reqQP.Set("timeout", strconv.FormatInt(int64(*appendBlobAppendBlockFromURLOptions.Timeout), 10))
+ if appendBlobClientAppendBlockFromURLOptions != nil && appendBlobClientAppendBlockFromURLOptions.Timeout != nil {
+ reqQP.Set("timeout", strconv.FormatInt(int64(*appendBlobClientAppendBlockFromURLOptions.Timeout), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
req.Raw().Header.Set("x-ms-copy-source", sourceURL)
- if appendBlobAppendBlockFromURLOptions != nil && appendBlobAppendBlockFromURLOptions.SourceRange != nil {
- req.Raw().Header.Set("x-ms-source-range", *appendBlobAppendBlockFromURLOptions.SourceRange)
+ if appendBlobClientAppendBlockFromURLOptions != nil && appendBlobClientAppendBlockFromURLOptions.SourceRange != nil {
+ req.Raw().Header.Set("x-ms-source-range", *appendBlobClientAppendBlockFromURLOptions.SourceRange)
}
- if appendBlobAppendBlockFromURLOptions != nil && appendBlobAppendBlockFromURLOptions.SourceContentMD5 != nil {
- req.Raw().Header.Set("x-ms-source-content-md5", base64.StdEncoding.EncodeToString(appendBlobAppendBlockFromURLOptions.SourceContentMD5))
+ if appendBlobClientAppendBlockFromURLOptions != nil && appendBlobClientAppendBlockFromURLOptions.SourceContentMD5 != nil {
+ req.Raw().Header.Set("x-ms-source-content-md5", base64.StdEncoding.EncodeToString(appendBlobClientAppendBlockFromURLOptions.SourceContentMD5))
}
- if appendBlobAppendBlockFromURLOptions != nil && appendBlobAppendBlockFromURLOptions.SourceContentcrc64 != nil {
- req.Raw().Header.Set("x-ms-source-content-crc64", base64.StdEncoding.EncodeToString(appendBlobAppendBlockFromURLOptions.SourceContentcrc64))
+ if appendBlobClientAppendBlockFromURLOptions != nil && appendBlobClientAppendBlockFromURLOptions.SourceContentcrc64 != nil {
+ req.Raw().Header.Set("x-ms-source-content-crc64", base64.StdEncoding.EncodeToString(appendBlobClientAppendBlockFromURLOptions.SourceContentcrc64))
}
req.Raw().Header.Set("Content-Length", strconv.FormatInt(contentLength, 10))
- if appendBlobAppendBlockFromURLOptions != nil && appendBlobAppendBlockFromURLOptions.TransactionalContentMD5 != nil {
- req.Raw().Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(appendBlobAppendBlockFromURLOptions.TransactionalContentMD5))
+ if appendBlobClientAppendBlockFromURLOptions != nil && appendBlobClientAppendBlockFromURLOptions.TransactionalContentMD5 != nil {
+ req.Raw().Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(appendBlobClientAppendBlockFromURLOptions.TransactionalContentMD5))
}
if cpkInfo != nil && cpkInfo.EncryptionKey != nil {
req.Raw().Header.Set("x-ms-encryption-key", *cpkInfo.EncryptionKey)
@@ -228,7 +261,7 @@ func (client *appendBlobClient) appendBlockFromURLCreateRequest(ctx context.Cont
req.Raw().Header.Set("x-ms-encryption-key-sha256", *cpkInfo.EncryptionKeySHA256)
}
if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil {
- req.Raw().Header.Set("x-ms-encryption-algorithm", "AES256")
+ req.Raw().Header.Set("x-ms-encryption-algorithm", string(*cpkInfo.EncryptionAlgorithm))
}
if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil {
req.Raw().Header.Set("x-ms-encryption-scope", *cpkScopeInfo.EncryptionScope)
@@ -269,38 +302,41 @@ func (client *appendBlobClient) appendBlockFromURLCreateRequest(ctx context.Cont
if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil {
req.Raw().Header.Set("x-ms-source-if-none-match", *sourceModifiedAccessConditions.SourceIfNoneMatch)
}
- req.Raw().Header.Set("x-ms-version", "2019-12-12")
- if appendBlobAppendBlockFromURLOptions != nil && appendBlobAppendBlockFromURLOptions.RequestID != nil {
- req.Raw().Header.Set("x-ms-client-request-id", *appendBlobAppendBlockFromURLOptions.RequestID)
+ req.Raw().Header.Set("x-ms-version", "2020-10-02")
+ if appendBlobClientAppendBlockFromURLOptions != nil && appendBlobClientAppendBlockFromURLOptions.RequestID != nil {
+ req.Raw().Header.Set("x-ms-client-request-id", *appendBlobClientAppendBlockFromURLOptions.RequestID)
+ }
+ if appendBlobClientAppendBlockFromURLOptions != nil && appendBlobClientAppendBlockFromURLOptions.CopySourceAuthorization != nil {
+ req.Raw().Header.Set("x-ms-copy-source-authorization", *appendBlobClientAppendBlockFromURLOptions.CopySourceAuthorization)
}
req.Raw().Header.Set("Accept", "application/xml")
return req, nil
}
// appendBlockFromURLHandleResponse handles the AppendBlockFromURL response.
-func (client *appendBlobClient) appendBlockFromURLHandleResponse(resp *http.Response) (AppendBlobAppendBlockFromURLResponse, error) {
- result := AppendBlobAppendBlockFromURLResponse{RawResponse: resp}
+func (client *appendBlobClient) appendBlockFromURLHandleResponse(resp *http.Response) (appendBlobClientAppendBlockFromURLResponse, error) {
+ result := appendBlobClientAppendBlockFromURLResponse{RawResponse: resp}
if val := resp.Header.Get("ETag"); val != "" {
result.ETag = &val
}
if val := resp.Header.Get("Last-Modified"); val != "" {
lastModified, err := time.Parse(time.RFC1123, val)
if err != nil {
- return AppendBlobAppendBlockFromURLResponse{}, err
+ return appendBlobClientAppendBlockFromURLResponse{}, err
}
result.LastModified = &lastModified
}
if val := resp.Header.Get("Content-MD5"); val != "" {
contentMD5, err := base64.StdEncoding.DecodeString(val)
if err != nil {
- return AppendBlobAppendBlockFromURLResponse{}, err
+ return appendBlobClientAppendBlockFromURLResponse{}, err
}
result.ContentMD5 = contentMD5
}
if val := resp.Header.Get("x-ms-content-crc64"); val != "" {
xMSContentCRC64, err := base64.StdEncoding.DecodeString(val)
if err != nil {
- return AppendBlobAppendBlockFromURLResponse{}, err
+ return appendBlobClientAppendBlockFromURLResponse{}, err
}
result.XMSContentCRC64 = xMSContentCRC64
}
@@ -313,7 +349,7 @@ func (client *appendBlobClient) appendBlockFromURLHandleResponse(resp *http.Resp
if val := resp.Header.Get("Date"); val != "" {
date, err := time.Parse(time.RFC1123, val)
if err != nil {
- return AppendBlobAppendBlockFromURLResponse{}, err
+ return appendBlobClientAppendBlockFromURLResponse{}, err
}
result.Date = &date
}
@@ -324,7 +360,7 @@ func (client *appendBlobClient) appendBlockFromURLHandleResponse(resp *http.Resp
blobCommittedBlockCount32, err := strconv.ParseInt(val, 10, 32)
blobCommittedBlockCount := int32(blobCommittedBlockCount32)
if err != nil {
- return AppendBlobAppendBlockFromURLResponse{}, err
+ return appendBlobClientAppendBlockFromURLResponse{}, err
}
result.BlobCommittedBlockCount = &blobCommittedBlockCount
}
@@ -337,7 +373,7 @@ func (client *appendBlobClient) appendBlockFromURLHandleResponse(resp *http.Resp
if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" {
isServerEncrypted, err := strconv.ParseBool(val)
if err != nil {
- return AppendBlobAppendBlockFromURLResponse{}, err
+ return appendBlobClientAppendBlockFromURLResponse{}, err
}
result.IsServerEncrypted = &isServerEncrypted
}
@@ -345,31 +381,39 @@ func (client *appendBlobClient) appendBlockFromURLHandleResponse(resp *http.Resp
}
// Create - The Create Append Blob operation creates a new append blob.
-// If the operation fails it returns the *StorageError error type.
-func (client *appendBlobClient) Create(ctx context.Context, contentLength int64, appendBlobCreateOptions *AppendBlobCreateOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (AppendBlobCreateResponse, error) {
- req, err := client.createCreateRequest(ctx, contentLength, appendBlobCreateOptions, blobHTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions)
+// If the operation fails it returns an *azcore.ResponseError type.
+// contentLength - The length of the request.
+// appendBlobClientCreateOptions - appendBlobClientCreateOptions contains the optional parameters for the appendBlobClient.Create
+// method.
+// BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the blobClient.SetHTTPHeaders method.
+// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method.
+// CpkInfo - CpkInfo contains a group of parameters for the blobClient.Download method.
+// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the blobClient.SetMetadata method.
+// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method.
+func (client *appendBlobClient) Create(ctx context.Context, contentLength int64, appendBlobClientCreateOptions *appendBlobClientCreateOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (appendBlobClientCreateResponse, error) {
+ req, err := client.createCreateRequest(ctx, contentLength, appendBlobClientCreateOptions, blobHTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions)
if err != nil {
- return AppendBlobCreateResponse{}, err
+ return appendBlobClientCreateResponse{}, err
}
- resp, err := client.con.Pipeline().Do(req)
+ resp, err := client.pl.Do(req)
if err != nil {
- return AppendBlobCreateResponse{}, err
+ return appendBlobClientCreateResponse{}, err
}
if !runtime.HasStatusCode(resp, http.StatusCreated) {
- return AppendBlobCreateResponse{}, runtime.NewResponseError(resp)
+ return appendBlobClientCreateResponse{}, runtime.NewResponseError(resp)
}
return client.createHandleResponse(resp)
}
// createCreateRequest creates the Create request.
-func (client *appendBlobClient) createCreateRequest(ctx context.Context, contentLength int64, appendBlobCreateOptions *AppendBlobCreateOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) {
- req, err := runtime.NewRequest(ctx, http.MethodPut, client.con.Endpoint())
+func (client *appendBlobClient) createCreateRequest(ctx context.Context, contentLength int64, appendBlobClientCreateOptions *appendBlobClientCreateOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) {
+ req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint)
if err != nil {
return nil, err
}
reqQP := req.Raw().URL.Query()
- if appendBlobCreateOptions != nil && appendBlobCreateOptions.Timeout != nil {
- reqQP.Set("timeout", strconv.FormatInt(int64(*appendBlobCreateOptions.Timeout), 10))
+ if appendBlobClientCreateOptions != nil && appendBlobClientCreateOptions.Timeout != nil {
+ reqQP.Set("timeout", strconv.FormatInt(int64(*appendBlobClientCreateOptions.Timeout), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
req.Raw().Header.Set("x-ms-blob-type", "AppendBlob")
@@ -389,8 +433,8 @@ func (client *appendBlobClient) createCreateRequest(ctx context.Context, content
if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil {
req.Raw().Header.Set("x-ms-blob-cache-control", *blobHTTPHeaders.BlobCacheControl)
}
- if appendBlobCreateOptions != nil && appendBlobCreateOptions.Metadata != nil {
- for k, v := range appendBlobCreateOptions.Metadata {
+ if appendBlobClientCreateOptions != nil && appendBlobClientCreateOptions.Metadata != nil {
+ for k, v := range appendBlobClientCreateOptions.Metadata {
req.Raw().Header.Set("x-ms-meta-"+k, v)
}
}
@@ -407,7 +451,7 @@ func (client *appendBlobClient) createCreateRequest(ctx context.Context, content
req.Raw().Header.Set("x-ms-encryption-key-sha256", *cpkInfo.EncryptionKeySHA256)
}
if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil {
- req.Raw().Header.Set("x-ms-encryption-algorithm", "AES256")
+ req.Raw().Header.Set("x-ms-encryption-algorithm", string(*cpkInfo.EncryptionAlgorithm))
}
if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil {
req.Raw().Header.Set("x-ms-encryption-scope", *cpkScopeInfo.EncryptionScope)
@@ -427,34 +471,43 @@ func (client *appendBlobClient) createCreateRequest(ctx context.Context, content
if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil {
req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags)
}
- req.Raw().Header.Set("x-ms-version", "2019-12-12")
- if appendBlobCreateOptions != nil && appendBlobCreateOptions.RequestID != nil {
- req.Raw().Header.Set("x-ms-client-request-id", *appendBlobCreateOptions.RequestID)
+ req.Raw().Header.Set("x-ms-version", "2020-10-02")
+ if appendBlobClientCreateOptions != nil && appendBlobClientCreateOptions.RequestID != nil {
+ req.Raw().Header.Set("x-ms-client-request-id", *appendBlobClientCreateOptions.RequestID)
+ }
+ if appendBlobClientCreateOptions != nil && appendBlobClientCreateOptions.BlobTagsString != nil {
+ req.Raw().Header.Set("x-ms-tags", *appendBlobClientCreateOptions.BlobTagsString)
+ }
+ if appendBlobClientCreateOptions != nil && appendBlobClientCreateOptions.ImmutabilityPolicyExpiry != nil {
+ req.Raw().Header.Set("x-ms-immutability-policy-until-date", appendBlobClientCreateOptions.ImmutabilityPolicyExpiry.Format(time.RFC1123))
+ }
+ if appendBlobClientCreateOptions != nil && appendBlobClientCreateOptions.ImmutabilityPolicyMode != nil {
+ req.Raw().Header.Set("x-ms-immutability-policy-mode", string(*appendBlobClientCreateOptions.ImmutabilityPolicyMode))
}
- if appendBlobCreateOptions != nil && appendBlobCreateOptions.BlobTagsString != nil {
- req.Raw().Header.Set("x-ms-tags", *appendBlobCreateOptions.BlobTagsString)
+ if appendBlobClientCreateOptions != nil && appendBlobClientCreateOptions.LegalHold != nil {
+ req.Raw().Header.Set("x-ms-legal-hold", strconv.FormatBool(*appendBlobClientCreateOptions.LegalHold))
}
req.Raw().Header.Set("Accept", "application/xml")
return req, nil
}
// createHandleResponse handles the Create response.
-func (client *appendBlobClient) createHandleResponse(resp *http.Response) (AppendBlobCreateResponse, error) {
- result := AppendBlobCreateResponse{RawResponse: resp}
+func (client *appendBlobClient) createHandleResponse(resp *http.Response) (appendBlobClientCreateResponse, error) {
+ result := appendBlobClientCreateResponse{RawResponse: resp}
if val := resp.Header.Get("ETag"); val != "" {
result.ETag = &val
}
if val := resp.Header.Get("Last-Modified"); val != "" {
lastModified, err := time.Parse(time.RFC1123, val)
if err != nil {
- return AppendBlobCreateResponse{}, err
+ return appendBlobClientCreateResponse{}, err
}
result.LastModified = &lastModified
}
if val := resp.Header.Get("Content-MD5"); val != "" {
contentMD5, err := base64.StdEncoding.DecodeString(val)
if err != nil {
- return AppendBlobCreateResponse{}, err
+ return appendBlobClientCreateResponse{}, err
}
result.ContentMD5 = contentMD5
}
@@ -473,14 +526,14 @@ func (client *appendBlobClient) createHandleResponse(resp *http.Response) (Appen
if val := resp.Header.Get("Date"); val != "" {
date, err := time.Parse(time.RFC1123, val)
if err != nil {
- return AppendBlobCreateResponse{}, err
+ return appendBlobClientCreateResponse{}, err
}
result.Date = &date
}
if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" {
isServerEncrypted, err := strconv.ParseBool(val)
if err != nil {
- return AppendBlobCreateResponse{}, err
+ return appendBlobClientCreateResponse{}, err
}
result.IsServerEncrypted = &isServerEncrypted
}
@@ -493,38 +546,45 @@ func (client *appendBlobClient) createHandleResponse(resp *http.Response) (Appen
return result, nil
}
-// Seal - The Seal operation seals the Append Blob to make it read-only. Seal is supported only on version 2019-12-12 version or later.
-// If the operation fails it returns the *StorageError error type.
-func (client *appendBlobClient) Seal(ctx context.Context, appendBlobSealOptions *AppendBlobSealOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions) (AppendBlobSealResponse, error) {
- req, err := client.sealCreateRequest(ctx, appendBlobSealOptions, leaseAccessConditions, modifiedAccessConditions, appendPositionAccessConditions)
+// Seal - The Seal operation seals the Append Blob to make it read-only. Seal is supported only on version 2019-12-12 version
+// or later.
+// If the operation fails it returns an *azcore.ResponseError type.
+// appendBlobClientSealOptions - appendBlobClientSealOptions contains the optional parameters for the appendBlobClient.Seal
+// method.
+// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method.
+// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method.
+// AppendPositionAccessConditions - AppendPositionAccessConditions contains a group of parameters for the appendBlobClient.AppendBlock
+// method.
+func (client *appendBlobClient) Seal(ctx context.Context, appendBlobClientSealOptions *appendBlobClientSealOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions) (appendBlobClientSealResponse, error) {
+ req, err := client.sealCreateRequest(ctx, appendBlobClientSealOptions, leaseAccessConditions, modifiedAccessConditions, appendPositionAccessConditions)
if err != nil {
- return AppendBlobSealResponse{}, err
+ return appendBlobClientSealResponse{}, err
}
- resp, err := client.con.Pipeline().Do(req)
+ resp, err := client.pl.Do(req)
if err != nil {
- return AppendBlobSealResponse{}, err
+ return appendBlobClientSealResponse{}, err
}
if !runtime.HasStatusCode(resp, http.StatusOK) {
- return AppendBlobSealResponse{}, runtime.NewResponseError(resp)
+ return appendBlobClientSealResponse{}, runtime.NewResponseError(resp)
}
return client.sealHandleResponse(resp)
}
// sealCreateRequest creates the Seal request.
-func (client *appendBlobClient) sealCreateRequest(ctx context.Context, appendBlobSealOptions *AppendBlobSealOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions) (*policy.Request, error) {
- req, err := runtime.NewRequest(ctx, http.MethodPut, client.con.Endpoint())
+func (client *appendBlobClient) sealCreateRequest(ctx context.Context, appendBlobClientSealOptions *appendBlobClientSealOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions) (*policy.Request, error) {
+ req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint)
if err != nil {
return nil, err
}
reqQP := req.Raw().URL.Query()
reqQP.Set("comp", "seal")
- if appendBlobSealOptions != nil && appendBlobSealOptions.Timeout != nil {
- reqQP.Set("timeout", strconv.FormatInt(int64(*appendBlobSealOptions.Timeout), 10))
+ if appendBlobClientSealOptions != nil && appendBlobClientSealOptions.Timeout != nil {
+ reqQP.Set("timeout", strconv.FormatInt(int64(*appendBlobClientSealOptions.Timeout), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
- req.Raw().Header.Set("x-ms-version", "2019-12-12")
- if appendBlobSealOptions != nil && appendBlobSealOptions.RequestID != nil {
- req.Raw().Header.Set("x-ms-client-request-id", *appendBlobSealOptions.RequestID)
+ req.Raw().Header.Set("x-ms-version", "2020-10-02")
+ if appendBlobClientSealOptions != nil && appendBlobClientSealOptions.RequestID != nil {
+ req.Raw().Header.Set("x-ms-client-request-id", *appendBlobClientSealOptions.RequestID)
}
if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID)
@@ -549,15 +609,15 @@ func (client *appendBlobClient) sealCreateRequest(ctx context.Context, appendBlo
}
// sealHandleResponse handles the Seal response.
-func (client *appendBlobClient) sealHandleResponse(resp *http.Response) (AppendBlobSealResponse, error) {
- result := AppendBlobSealResponse{RawResponse: resp}
+func (client *appendBlobClient) sealHandleResponse(resp *http.Response) (appendBlobClientSealResponse, error) {
+ result := appendBlobClientSealResponse{RawResponse: resp}
if val := resp.Header.Get("ETag"); val != "" {
result.ETag = &val
}
if val := resp.Header.Get("Last-Modified"); val != "" {
lastModified, err := time.Parse(time.RFC1123, val)
if err != nil {
- return AppendBlobSealResponse{}, err
+ return appendBlobClientSealResponse{}, err
}
result.LastModified = &lastModified
}
@@ -573,14 +633,14 @@ func (client *appendBlobClient) sealHandleResponse(resp *http.Response) (AppendB
if val := resp.Header.Get("Date"); val != "" {
date, err := time.Parse(time.RFC1123, val)
if err != nil {
- return AppendBlobSealResponse{}, err
+ return appendBlobClientSealResponse{}, err
}
result.Date = &date
}
if val := resp.Header.Get("x-ms-blob-sealed"); val != "" {
isSealed, err := strconv.ParseBool(val)
if err != nil {
- return AppendBlobSealResponse{}, err
+ return appendBlobClientSealResponse{}, err
}
result.IsSealed = &isSealed
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_blob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_blob_client.go
index 05a273cb25a..607c6a714dc 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_blob_client.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_blob_client.go
@@ -1,5 +1,5 @@
-//go:build go1.16
-// +build go1.16
+//go:build go1.18
+// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
@@ -11,66 +11,80 @@ package azblob
import (
"context"
"encoding/base64"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
"net/http"
"strconv"
"strings"
"time"
-
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
)
type blobClient struct {
- con *connection
- pathRenameMode *PathRenameMode
+ endpoint string
+ pl runtime.Pipeline
+}
+
+// newBlobClient creates a new instance of blobClient with the specified values.
+// endpoint - The URL of the service account, container, or blob that is the target of the desired operation.
+// pl - the pipeline used for sending requests and handling responses.
+func newBlobClient(endpoint string, pl runtime.Pipeline) *blobClient {
+ client := &blobClient{
+ endpoint: endpoint,
+ pl: pl,
+ }
+ return client
}
-// AbortCopyFromURL - The Abort Copy From URL operation aborts a pending Copy From URL operation, and leaves a destination blob with zero length and full
-// metadata.
-// If the operation fails it returns the *StorageError error type.
-func (client *blobClient) AbortCopyFromURL(ctx context.Context, copyID string, blobAbortCopyFromURLOptions *BlobAbortCopyFromURLOptions, leaseAccessConditions *LeaseAccessConditions) (BlobAbortCopyFromURLResponse, error) {
- req, err := client.abortCopyFromURLCreateRequest(ctx, copyID, blobAbortCopyFromURLOptions, leaseAccessConditions)
+// AbortCopyFromURL - The Abort Copy From URL operation aborts a pending Copy From URL operation, and leaves a destination
+// blob with zero length and full metadata.
+// If the operation fails it returns an *azcore.ResponseError type.
+// copyID - The copy identifier provided in the x-ms-copy-id header of the original Copy Blob operation.
+// blobClientAbortCopyFromURLOptions - blobClientAbortCopyFromURLOptions contains the optional parameters for the blobClient.AbortCopyFromURL
+// method.
+// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method.
+func (client *blobClient) AbortCopyFromURL(ctx context.Context, copyID string, blobClientAbortCopyFromURLOptions *blobClientAbortCopyFromURLOptions, leaseAccessConditions *LeaseAccessConditions) (blobClientAbortCopyFromURLResponse, error) {
+ req, err := client.abortCopyFromURLCreateRequest(ctx, copyID, blobClientAbortCopyFromURLOptions, leaseAccessConditions)
if err != nil {
- return BlobAbortCopyFromURLResponse{}, err
+ return blobClientAbortCopyFromURLResponse{}, err
}
- resp, err := client.con.Pipeline().Do(req)
+ resp, err := client.pl.Do(req)
if err != nil {
- return BlobAbortCopyFromURLResponse{}, err
+ return blobClientAbortCopyFromURLResponse{}, err
}
if !runtime.HasStatusCode(resp, http.StatusNoContent) {
- return BlobAbortCopyFromURLResponse{}, runtime.NewResponseError(resp)
+ return blobClientAbortCopyFromURLResponse{}, runtime.NewResponseError(resp)
}
return client.abortCopyFromURLHandleResponse(resp)
}
// abortCopyFromURLCreateRequest creates the AbortCopyFromURL request.
-func (client *blobClient) abortCopyFromURLCreateRequest(ctx context.Context, copyID string, blobAbortCopyFromURLOptions *BlobAbortCopyFromURLOptions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) {
- req, err := runtime.NewRequest(ctx, http.MethodPut, client.con.Endpoint())
+func (client *blobClient) abortCopyFromURLCreateRequest(ctx context.Context, copyID string, blobClientAbortCopyFromURLOptions *blobClientAbortCopyFromURLOptions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) {
+ req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint)
if err != nil {
return nil, err
}
reqQP := req.Raw().URL.Query()
reqQP.Set("comp", "copy")
reqQP.Set("copyid", copyID)
- if blobAbortCopyFromURLOptions != nil && blobAbortCopyFromURLOptions.Timeout != nil {
- reqQP.Set("timeout", strconv.FormatInt(int64(*blobAbortCopyFromURLOptions.Timeout), 10))
+ if blobClientAbortCopyFromURLOptions != nil && blobClientAbortCopyFromURLOptions.Timeout != nil {
+ reqQP.Set("timeout", strconv.FormatInt(int64(*blobClientAbortCopyFromURLOptions.Timeout), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
req.Raw().Header.Set("x-ms-copy-action", "abort")
if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID)
}
- req.Raw().Header.Set("x-ms-version", "2019-12-12")
- if blobAbortCopyFromURLOptions != nil && blobAbortCopyFromURLOptions.RequestID != nil {
- req.Raw().Header.Set("x-ms-client-request-id", *blobAbortCopyFromURLOptions.RequestID)
+ req.Raw().Header.Set("x-ms-version", "2020-10-02")
+ if blobClientAbortCopyFromURLOptions != nil && blobClientAbortCopyFromURLOptions.RequestID != nil {
+ req.Raw().Header.Set("x-ms-client-request-id", *blobClientAbortCopyFromURLOptions.RequestID)
}
req.Raw().Header.Set("Accept", "application/xml")
return req, nil
}
// abortCopyFromURLHandleResponse handles the AbortCopyFromURL response.
-func (client *blobClient) abortCopyFromURLHandleResponse(resp *http.Response) (BlobAbortCopyFromURLResponse, error) {
- result := BlobAbortCopyFromURLResponse{RawResponse: resp}
+func (client *blobClient) abortCopyFromURLHandleResponse(resp *http.Response) (blobClientAbortCopyFromURLResponse, error) {
+ result := blobClientAbortCopyFromURLResponse{RawResponse: resp}
if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
result.ClientRequestID = &val
}
@@ -83,7 +97,7 @@ func (client *blobClient) abortCopyFromURLHandleResponse(resp *http.Response) (B
if val := resp.Header.Get("Date"); val != "" {
date, err := time.Parse(time.RFC1123, val)
if err != nil {
- return BlobAbortCopyFromURLResponse{}, err
+ return blobClientAbortCopyFromURLResponse{}, err
}
result.Date = &date
}
@@ -91,40 +105,43 @@ func (client *blobClient) abortCopyFromURLHandleResponse(resp *http.Response) (B
}
// AcquireLease - [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations
-// If the operation fails it returns the *StorageError error type.
-func (client *blobClient) AcquireLease(ctx context.Context, blobAcquireLeaseOptions *BlobAcquireLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (BlobAcquireLeaseResponse, error) {
- req, err := client.acquireLeaseCreateRequest(ctx, blobAcquireLeaseOptions, modifiedAccessConditions)
+// If the operation fails it returns an *azcore.ResponseError type.
+// blobClientAcquireLeaseOptions - blobClientAcquireLeaseOptions contains the optional parameters for the blobClient.AcquireLease
+// method.
+// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method.
+func (client *blobClient) AcquireLease(ctx context.Context, blobClientAcquireLeaseOptions *blobClientAcquireLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (blobClientAcquireLeaseResponse, error) {
+ req, err := client.acquireLeaseCreateRequest(ctx, blobClientAcquireLeaseOptions, modifiedAccessConditions)
if err != nil {
- return BlobAcquireLeaseResponse{}, err
+ return blobClientAcquireLeaseResponse{}, err
}
- resp, err := client.con.Pipeline().Do(req)
+ resp, err := client.pl.Do(req)
if err != nil {
- return BlobAcquireLeaseResponse{}, err
+ return blobClientAcquireLeaseResponse{}, err
}
if !runtime.HasStatusCode(resp, http.StatusCreated) {
- return BlobAcquireLeaseResponse{}, runtime.NewResponseError(resp)
+ return blobClientAcquireLeaseResponse{}, runtime.NewResponseError(resp)
}
return client.acquireLeaseHandleResponse(resp)
}
// acquireLeaseCreateRequest creates the AcquireLease request.
-func (client *blobClient) acquireLeaseCreateRequest(ctx context.Context, blobAcquireLeaseOptions *BlobAcquireLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) {
- req, err := runtime.NewRequest(ctx, http.MethodPut, client.con.Endpoint())
+func (client *blobClient) acquireLeaseCreateRequest(ctx context.Context, blobClientAcquireLeaseOptions *blobClientAcquireLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) {
+ req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint)
if err != nil {
return nil, err
}
reqQP := req.Raw().URL.Query()
reqQP.Set("comp", "lease")
- if blobAcquireLeaseOptions != nil && blobAcquireLeaseOptions.Timeout != nil {
- reqQP.Set("timeout", strconv.FormatInt(int64(*blobAcquireLeaseOptions.Timeout), 10))
+ if blobClientAcquireLeaseOptions != nil && blobClientAcquireLeaseOptions.Timeout != nil {
+ reqQP.Set("timeout", strconv.FormatInt(int64(*blobClientAcquireLeaseOptions.Timeout), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
req.Raw().Header.Set("x-ms-lease-action", "acquire")
- if blobAcquireLeaseOptions != nil && blobAcquireLeaseOptions.Duration != nil {
- req.Raw().Header.Set("x-ms-lease-duration", strconv.FormatInt(int64(*blobAcquireLeaseOptions.Duration), 10))
+ if blobClientAcquireLeaseOptions != nil && blobClientAcquireLeaseOptions.Duration != nil {
+ req.Raw().Header.Set("x-ms-lease-duration", strconv.FormatInt(int64(*blobClientAcquireLeaseOptions.Duration), 10))
}
- if blobAcquireLeaseOptions != nil && blobAcquireLeaseOptions.ProposedLeaseID != nil {
- req.Raw().Header.Set("x-ms-proposed-lease-id", *blobAcquireLeaseOptions.ProposedLeaseID)
+ if blobClientAcquireLeaseOptions != nil && blobClientAcquireLeaseOptions.ProposedLeaseID != nil {
+ req.Raw().Header.Set("x-ms-proposed-lease-id", *blobClientAcquireLeaseOptions.ProposedLeaseID)
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123))
@@ -141,24 +158,24 @@ func (client *blobClient) acquireLeaseCreateRequest(ctx context.Context, blobAcq
if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil {
req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags)
}
- req.Raw().Header.Set("x-ms-version", "2019-12-12")
- if blobAcquireLeaseOptions != nil && blobAcquireLeaseOptions.RequestID != nil {
- req.Raw().Header.Set("x-ms-client-request-id", *blobAcquireLeaseOptions.RequestID)
+ req.Raw().Header.Set("x-ms-version", "2020-10-02")
+ if blobClientAcquireLeaseOptions != nil && blobClientAcquireLeaseOptions.RequestID != nil {
+ req.Raw().Header.Set("x-ms-client-request-id", *blobClientAcquireLeaseOptions.RequestID)
}
req.Raw().Header.Set("Accept", "application/xml")
return req, nil
}
// acquireLeaseHandleResponse handles the AcquireLease response.
-func (client *blobClient) acquireLeaseHandleResponse(resp *http.Response) (BlobAcquireLeaseResponse, error) {
- result := BlobAcquireLeaseResponse{RawResponse: resp}
+func (client *blobClient) acquireLeaseHandleResponse(resp *http.Response) (blobClientAcquireLeaseResponse, error) {
+ result := blobClientAcquireLeaseResponse{RawResponse: resp}
if val := resp.Header.Get("ETag"); val != "" {
result.ETag = &val
}
if val := resp.Header.Get("Last-Modified"); val != "" {
lastModified, err := time.Parse(time.RFC1123, val)
if err != nil {
- return BlobAcquireLeaseResponse{}, err
+ return blobClientAcquireLeaseResponse{}, err
}
result.LastModified = &lastModified
}
@@ -177,7 +194,7 @@ func (client *blobClient) acquireLeaseHandleResponse(resp *http.Response) (BlobA
if val := resp.Header.Get("Date"); val != "" {
date, err := time.Parse(time.RFC1123, val)
if err != nil {
- return BlobAcquireLeaseResponse{}, err
+ return blobClientAcquireLeaseResponse{}, err
}
result.Date = &date
}
@@ -185,37 +202,40 @@ func (client *blobClient) acquireLeaseHandleResponse(resp *http.Response) (BlobA
}
// BreakLease - [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations
-// If the operation fails it returns the *StorageError error type.
-func (client *blobClient) BreakLease(ctx context.Context, blobBreakLeaseOptions *BlobBreakLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (BlobBreakLeaseResponse, error) {
- req, err := client.breakLeaseCreateRequest(ctx, blobBreakLeaseOptions, modifiedAccessConditions)
+// If the operation fails it returns an *azcore.ResponseError type.
+// blobClientBreakLeaseOptions - blobClientBreakLeaseOptions contains the optional parameters for the blobClient.BreakLease
+// method.
+// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method.
+func (client *blobClient) BreakLease(ctx context.Context, blobClientBreakLeaseOptions *blobClientBreakLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (blobClientBreakLeaseResponse, error) {
+ req, err := client.breakLeaseCreateRequest(ctx, blobClientBreakLeaseOptions, modifiedAccessConditions)
if err != nil {
- return BlobBreakLeaseResponse{}, err
+ return blobClientBreakLeaseResponse{}, err
}
- resp, err := client.con.Pipeline().Do(req)
+ resp, err := client.pl.Do(req)
if err != nil {
- return BlobBreakLeaseResponse{}, err
+ return blobClientBreakLeaseResponse{}, err
}
if !runtime.HasStatusCode(resp, http.StatusAccepted) {
- return BlobBreakLeaseResponse{}, runtime.NewResponseError(resp)
+ return blobClientBreakLeaseResponse{}, runtime.NewResponseError(resp)
}
return client.breakLeaseHandleResponse(resp)
}
// breakLeaseCreateRequest creates the BreakLease request.
-func (client *blobClient) breakLeaseCreateRequest(ctx context.Context, blobBreakLeaseOptions *BlobBreakLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) {
- req, err := runtime.NewRequest(ctx, http.MethodPut, client.con.Endpoint())
+func (client *blobClient) breakLeaseCreateRequest(ctx context.Context, blobClientBreakLeaseOptions *blobClientBreakLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) {
+ req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint)
if err != nil {
return nil, err
}
reqQP := req.Raw().URL.Query()
reqQP.Set("comp", "lease")
- if blobBreakLeaseOptions != nil && blobBreakLeaseOptions.Timeout != nil {
- reqQP.Set("timeout", strconv.FormatInt(int64(*blobBreakLeaseOptions.Timeout), 10))
+ if blobClientBreakLeaseOptions != nil && blobClientBreakLeaseOptions.Timeout != nil {
+ reqQP.Set("timeout", strconv.FormatInt(int64(*blobClientBreakLeaseOptions.Timeout), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
req.Raw().Header.Set("x-ms-lease-action", "break")
- if blobBreakLeaseOptions != nil && blobBreakLeaseOptions.BreakPeriod != nil {
- req.Raw().Header.Set("x-ms-lease-break-period", strconv.FormatInt(int64(*blobBreakLeaseOptions.BreakPeriod), 10))
+ if blobClientBreakLeaseOptions != nil && blobClientBreakLeaseOptions.BreakPeriod != nil {
+ req.Raw().Header.Set("x-ms-lease-break-period", strconv.FormatInt(int64(*blobClientBreakLeaseOptions.BreakPeriod), 10))
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123))
@@ -232,24 +252,24 @@ func (client *blobClient) breakLeaseCreateRequest(ctx context.Context, blobBreak
if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil {
req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags)
}
- req.Raw().Header.Set("x-ms-version", "2019-12-12")
- if blobBreakLeaseOptions != nil && blobBreakLeaseOptions.RequestID != nil {
- req.Raw().Header.Set("x-ms-client-request-id", *blobBreakLeaseOptions.RequestID)
+ req.Raw().Header.Set("x-ms-version", "2020-10-02")
+ if blobClientBreakLeaseOptions != nil && blobClientBreakLeaseOptions.RequestID != nil {
+ req.Raw().Header.Set("x-ms-client-request-id", *blobClientBreakLeaseOptions.RequestID)
}
req.Raw().Header.Set("Accept", "application/xml")
return req, nil
}
// breakLeaseHandleResponse handles the BreakLease response.
-func (client *blobClient) breakLeaseHandleResponse(resp *http.Response) (BlobBreakLeaseResponse, error) {
- result := BlobBreakLeaseResponse{RawResponse: resp}
+func (client *blobClient) breakLeaseHandleResponse(resp *http.Response) (blobClientBreakLeaseResponse, error) {
+ result := blobClientBreakLeaseResponse{RawResponse: resp}
if val := resp.Header.Get("ETag"); val != "" {
result.ETag = &val
}
if val := resp.Header.Get("Last-Modified"); val != "" {
lastModified, err := time.Parse(time.RFC1123, val)
if err != nil {
- return BlobBreakLeaseResponse{}, err
+ return blobClientBreakLeaseResponse{}, err
}
result.LastModified = &lastModified
}
@@ -257,7 +277,7 @@ func (client *blobClient) breakLeaseHandleResponse(resp *http.Response) (BlobBre
leaseTime32, err := strconv.ParseInt(val, 10, 32)
leaseTime := int32(leaseTime32)
if err != nil {
- return BlobBreakLeaseResponse{}, err
+ return blobClientBreakLeaseResponse{}, err
}
result.LeaseTime = &leaseTime
}
@@ -273,7 +293,7 @@ func (client *blobClient) breakLeaseHandleResponse(resp *http.Response) (BlobBre
if val := resp.Header.Get("Date"); val != "" {
date, err := time.Parse(time.RFC1123, val)
if err != nil {
- return BlobBreakLeaseResponse{}, err
+ return blobClientBreakLeaseResponse{}, err
}
result.Date = &date
}
@@ -281,32 +301,39 @@ func (client *blobClient) breakLeaseHandleResponse(resp *http.Response) (BlobBre
}
// ChangeLease - [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations
-// If the operation fails it returns the *StorageError error type.
-func (client *blobClient) ChangeLease(ctx context.Context, leaseID string, proposedLeaseID string, blobChangeLeaseOptions *BlobChangeLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (BlobChangeLeaseResponse, error) {
- req, err := client.changeLeaseCreateRequest(ctx, leaseID, proposedLeaseID, blobChangeLeaseOptions, modifiedAccessConditions)
+// If the operation fails it returns an *azcore.ResponseError type.
+// leaseID - Specifies the current lease ID on the resource.
+// proposedLeaseID - Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed
+// lease ID is not in the correct format. See Guid Constructor (String) for a list of valid GUID
+// string formats.
+// blobClientChangeLeaseOptions - blobClientChangeLeaseOptions contains the optional parameters for the blobClient.ChangeLease
+// method.
+// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method.
+func (client *blobClient) ChangeLease(ctx context.Context, leaseID string, proposedLeaseID string, blobClientChangeLeaseOptions *blobClientChangeLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (blobClientChangeLeaseResponse, error) {
+ req, err := client.changeLeaseCreateRequest(ctx, leaseID, proposedLeaseID, blobClientChangeLeaseOptions, modifiedAccessConditions)
if err != nil {
- return BlobChangeLeaseResponse{}, err
+ return blobClientChangeLeaseResponse{}, err
}
- resp, err := client.con.Pipeline().Do(req)
+ resp, err := client.pl.Do(req)
if err != nil {
- return BlobChangeLeaseResponse{}, err
+ return blobClientChangeLeaseResponse{}, err
}
if !runtime.HasStatusCode(resp, http.StatusOK) {
- return BlobChangeLeaseResponse{}, runtime.NewResponseError(resp)
+ return blobClientChangeLeaseResponse{}, runtime.NewResponseError(resp)
}
return client.changeLeaseHandleResponse(resp)
}
// changeLeaseCreateRequest creates the ChangeLease request.
-func (client *blobClient) changeLeaseCreateRequest(ctx context.Context, leaseID string, proposedLeaseID string, blobChangeLeaseOptions *BlobChangeLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) {
- req, err := runtime.NewRequest(ctx, http.MethodPut, client.con.Endpoint())
+func (client *blobClient) changeLeaseCreateRequest(ctx context.Context, leaseID string, proposedLeaseID string, blobClientChangeLeaseOptions *blobClientChangeLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) {
+ req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint)
if err != nil {
return nil, err
}
reqQP := req.Raw().URL.Query()
reqQP.Set("comp", "lease")
- if blobChangeLeaseOptions != nil && blobChangeLeaseOptions.Timeout != nil {
- reqQP.Set("timeout", strconv.FormatInt(int64(*blobChangeLeaseOptions.Timeout), 10))
+ if blobClientChangeLeaseOptions != nil && blobClientChangeLeaseOptions.Timeout != nil {
+ reqQP.Set("timeout", strconv.FormatInt(int64(*blobClientChangeLeaseOptions.Timeout), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
req.Raw().Header.Set("x-ms-lease-action", "change")
@@ -327,24 +354,24 @@ func (client *blobClient) changeLeaseCreateRequest(ctx context.Context, leaseID
if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil {
req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags)
}
- req.Raw().Header.Set("x-ms-version", "2019-12-12")
- if blobChangeLeaseOptions != nil && blobChangeLeaseOptions.RequestID != nil {
- req.Raw().Header.Set("x-ms-client-request-id", *blobChangeLeaseOptions.RequestID)
+ req.Raw().Header.Set("x-ms-version", "2020-10-02")
+ if blobClientChangeLeaseOptions != nil && blobClientChangeLeaseOptions.RequestID != nil {
+ req.Raw().Header.Set("x-ms-client-request-id", *blobClientChangeLeaseOptions.RequestID)
}
req.Raw().Header.Set("Accept", "application/xml")
return req, nil
}
// changeLeaseHandleResponse handles the ChangeLease response.
-func (client *blobClient) changeLeaseHandleResponse(resp *http.Response) (BlobChangeLeaseResponse, error) {
- result := BlobChangeLeaseResponse{RawResponse: resp}
+func (client *blobClient) changeLeaseHandleResponse(resp *http.Response) (blobClientChangeLeaseResponse, error) {
+ result := blobClientChangeLeaseResponse{RawResponse: resp}
if val := resp.Header.Get("ETag"); val != "" {
result.ETag = &val
}
if val := resp.Header.Get("Last-Modified"); val != "" {
lastModified, err := time.Parse(time.RFC1123, val)
if err != nil {
- return BlobChangeLeaseResponse{}, err
+ return blobClientChangeLeaseResponse{}, err
}
result.LastModified = &lastModified
}
@@ -363,49 +390,59 @@ func (client *blobClient) changeLeaseHandleResponse(resp *http.Response) (BlobCh
if val := resp.Header.Get("Date"); val != "" {
date, err := time.Parse(time.RFC1123, val)
if err != nil {
- return BlobChangeLeaseResponse{}, err
+ return blobClientChangeLeaseResponse{}, err
}
result.Date = &date
}
return result, nil
}
-// CopyFromURL - The Copy From URL operation copies a blob or an internet resource to a new blob. It will not return a response until the copy is complete.
-// If the operation fails it returns the *StorageError error type.
-func (client *blobClient) CopyFromURL(ctx context.Context, copySource string, blobCopyFromURLOptions *BlobCopyFromURLOptions, sourceModifiedAccessConditions *SourceModifiedAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (BlobCopyFromURLResponse, error) {
- req, err := client.copyFromURLCreateRequest(ctx, copySource, blobCopyFromURLOptions, sourceModifiedAccessConditions, modifiedAccessConditions, leaseAccessConditions)
+// CopyFromURL - The Copy From URL operation copies a blob or an internet resource to a new blob. It will not return a response
+// until the copy is complete.
+// If the operation fails it returns an *azcore.ResponseError type.
+// copySource - Specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that specifies
+// a page blob snapshot. The value should be URL-encoded as it would appear in a request
+// URI. The source blob must either be public or must be authenticated via a shared access signature.
+// blobClientCopyFromURLOptions - blobClientCopyFromURLOptions contains the optional parameters for the blobClient.CopyFromURL
+// method.
+// SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the blobClient.StartCopyFromURL
+// method.
+// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method.
+// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method.
+func (client *blobClient) CopyFromURL(ctx context.Context, copySource string, blobClientCopyFromURLOptions *blobClientCopyFromURLOptions, sourceModifiedAccessConditions *SourceModifiedAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (blobClientCopyFromURLResponse, error) {
+ req, err := client.copyFromURLCreateRequest(ctx, copySource, blobClientCopyFromURLOptions, sourceModifiedAccessConditions, modifiedAccessConditions, leaseAccessConditions)
if err != nil {
- return BlobCopyFromURLResponse{}, err
+ return blobClientCopyFromURLResponse{}, err
}
- resp, err := client.con.Pipeline().Do(req)
+ resp, err := client.pl.Do(req)
if err != nil {
- return BlobCopyFromURLResponse{}, err
+ return blobClientCopyFromURLResponse{}, err
}
if !runtime.HasStatusCode(resp, http.StatusAccepted) {
- return BlobCopyFromURLResponse{}, runtime.NewResponseError(resp)
+ return blobClientCopyFromURLResponse{}, runtime.NewResponseError(resp)
}
return client.copyFromURLHandleResponse(resp)
}
// copyFromURLCreateRequest creates the CopyFromURL request.
-func (client *blobClient) copyFromURLCreateRequest(ctx context.Context, copySource string, blobCopyFromURLOptions *BlobCopyFromURLOptions, sourceModifiedAccessConditions *SourceModifiedAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) {
- req, err := runtime.NewRequest(ctx, http.MethodPut, client.con.Endpoint())
+func (client *blobClient) copyFromURLCreateRequest(ctx context.Context, copySource string, blobClientCopyFromURLOptions *blobClientCopyFromURLOptions, sourceModifiedAccessConditions *SourceModifiedAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) {
+ req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint)
if err != nil {
return nil, err
}
reqQP := req.Raw().URL.Query()
- if blobCopyFromURLOptions != nil && blobCopyFromURLOptions.Timeout != nil {
- reqQP.Set("timeout", strconv.FormatInt(int64(*blobCopyFromURLOptions.Timeout), 10))
+ if blobClientCopyFromURLOptions != nil && blobClientCopyFromURLOptions.Timeout != nil {
+ reqQP.Set("timeout", strconv.FormatInt(int64(*blobClientCopyFromURLOptions.Timeout), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
req.Raw().Header.Set("x-ms-requires-sync", "true")
- if blobCopyFromURLOptions != nil && blobCopyFromURLOptions.Metadata != nil {
- for k, v := range blobCopyFromURLOptions.Metadata {
+ if blobClientCopyFromURLOptions != nil && blobClientCopyFromURLOptions.Metadata != nil {
+ for k, v := range blobClientCopyFromURLOptions.Metadata {
req.Raw().Header.Set("x-ms-meta-"+k, v)
}
}
- if blobCopyFromURLOptions != nil && blobCopyFromURLOptions.Tier != nil {
- req.Raw().Header.Set("x-ms-access-tier", string(*blobCopyFromURLOptions.Tier))
+ if blobClientCopyFromURLOptions != nil && blobClientCopyFromURLOptions.Tier != nil {
+ req.Raw().Header.Set("x-ms-access-tier", string(*blobClientCopyFromURLOptions.Tier))
}
if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil {
req.Raw().Header.Set("x-ms-source-if-modified-since", sourceModifiedAccessConditions.SourceIfModifiedSince.Format(time.RFC1123))
@@ -438,30 +475,42 @@ func (client *blobClient) copyFromURLCreateRequest(ctx context.Context, copySour
if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID)
}
- req.Raw().Header.Set("x-ms-version", "2019-12-12")
- if blobCopyFromURLOptions != nil && blobCopyFromURLOptions.RequestID != nil {
- req.Raw().Header.Set("x-ms-client-request-id", *blobCopyFromURLOptions.RequestID)
+ req.Raw().Header.Set("x-ms-version", "2020-10-02")
+ if blobClientCopyFromURLOptions != nil && blobClientCopyFromURLOptions.RequestID != nil {
+ req.Raw().Header.Set("x-ms-client-request-id", *blobClientCopyFromURLOptions.RequestID)
+ }
+ if blobClientCopyFromURLOptions != nil && blobClientCopyFromURLOptions.SourceContentMD5 != nil {
+ req.Raw().Header.Set("x-ms-source-content-md5", base64.StdEncoding.EncodeToString(blobClientCopyFromURLOptions.SourceContentMD5))
+ }
+ if blobClientCopyFromURLOptions != nil && blobClientCopyFromURLOptions.BlobTagsString != nil {
+ req.Raw().Header.Set("x-ms-tags", *blobClientCopyFromURLOptions.BlobTagsString)
+ }
+ if blobClientCopyFromURLOptions != nil && blobClientCopyFromURLOptions.ImmutabilityPolicyExpiry != nil {
+ req.Raw().Header.Set("x-ms-immutability-policy-until-date", blobClientCopyFromURLOptions.ImmutabilityPolicyExpiry.Format(time.RFC1123))
+ }
+ if blobClientCopyFromURLOptions != nil && blobClientCopyFromURLOptions.ImmutabilityPolicyMode != nil {
+ req.Raw().Header.Set("x-ms-immutability-policy-mode", string(*blobClientCopyFromURLOptions.ImmutabilityPolicyMode))
}
- if blobCopyFromURLOptions != nil && blobCopyFromURLOptions.SourceContentMD5 != nil {
- req.Raw().Header.Set("x-ms-source-content-md5", base64.StdEncoding.EncodeToString(blobCopyFromURLOptions.SourceContentMD5))
+ if blobClientCopyFromURLOptions != nil && blobClientCopyFromURLOptions.LegalHold != nil {
+ req.Raw().Header.Set("x-ms-legal-hold", strconv.FormatBool(*blobClientCopyFromURLOptions.LegalHold))
}
- if blobCopyFromURLOptions != nil && blobCopyFromURLOptions.BlobTagsString != nil {
- req.Raw().Header.Set("x-ms-tags", *blobCopyFromURLOptions.BlobTagsString)
+ if blobClientCopyFromURLOptions != nil && blobClientCopyFromURLOptions.CopySourceAuthorization != nil {
+ req.Raw().Header.Set("x-ms-copy-source-authorization", *blobClientCopyFromURLOptions.CopySourceAuthorization)
}
req.Raw().Header.Set("Accept", "application/xml")
return req, nil
}
// copyFromURLHandleResponse handles the CopyFromURL response.
-func (client *blobClient) copyFromURLHandleResponse(resp *http.Response) (BlobCopyFromURLResponse, error) {
- result := BlobCopyFromURLResponse{RawResponse: resp}
+func (client *blobClient) copyFromURLHandleResponse(resp *http.Response) (blobClientCopyFromURLResponse, error) {
+ result := blobClientCopyFromURLResponse{RawResponse: resp}
if val := resp.Header.Get("ETag"); val != "" {
result.ETag = &val
}
if val := resp.Header.Get("Last-Modified"); val != "" {
lastModified, err := time.Parse(time.RFC1123, val)
if err != nil {
- return BlobCopyFromURLResponse{}, err
+ return blobClientCopyFromURLResponse{}, err
}
result.LastModified = &lastModified
}
@@ -480,7 +529,7 @@ func (client *blobClient) copyFromURLHandleResponse(resp *http.Response) (BlobCo
if val := resp.Header.Get("Date"); val != "" {
date, err := time.Parse(time.RFC1123, val)
if err != nil {
- return BlobCopyFromURLResponse{}, err
+ return blobClientCopyFromURLResponse{}, err
}
result.Date = &date
}
@@ -493,14 +542,14 @@ func (client *blobClient) copyFromURLHandleResponse(resp *http.Response) (BlobCo
if val := resp.Header.Get("Content-MD5"); val != "" {
contentMD5, err := base64.StdEncoding.DecodeString(val)
if err != nil {
- return BlobCopyFromURLResponse{}, err
+ return blobClientCopyFromURLResponse{}, err
}
result.ContentMD5 = contentMD5
}
if val := resp.Header.Get("x-ms-content-crc64"); val != "" {
xMSContentCRC64, err := base64.StdEncoding.DecodeString(val)
if err != nil {
- return BlobCopyFromURLResponse{}, err
+ return blobClientCopyFromURLResponse{}, err
}
result.XMSContentCRC64 = xMSContentCRC64
}
@@ -508,36 +557,42 @@ func (client *blobClient) copyFromURLHandleResponse(resp *http.Response) (BlobCo
}
// CreateSnapshot - The Create Snapshot operation creates a read-only snapshot of a blob
-// If the operation fails it returns the *StorageError error type.
-func (client *blobClient) CreateSnapshot(ctx context.Context, blobCreateSnapshotOptions *BlobCreateSnapshotOptions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (BlobCreateSnapshotResponse, error) {
- req, err := client.createSnapshotCreateRequest(ctx, blobCreateSnapshotOptions, cpkInfo, cpkScopeInfo, modifiedAccessConditions, leaseAccessConditions)
+// If the operation fails it returns an *azcore.ResponseError type.
+// blobClientCreateSnapshotOptions - blobClientCreateSnapshotOptions contains the optional parameters for the blobClient.CreateSnapshot
+// method.
+// CpkInfo - CpkInfo contains a group of parameters for the blobClient.Download method.
+// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the blobClient.SetMetadata method.
+// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method.
+// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method.
+func (client *blobClient) CreateSnapshot(ctx context.Context, blobClientCreateSnapshotOptions *blobClientCreateSnapshotOptions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (blobClientCreateSnapshotResponse, error) {
+ req, err := client.createSnapshotCreateRequest(ctx, blobClientCreateSnapshotOptions, cpkInfo, cpkScopeInfo, modifiedAccessConditions, leaseAccessConditions)
if err != nil {
- return BlobCreateSnapshotResponse{}, err
+ return blobClientCreateSnapshotResponse{}, err
}
- resp, err := client.con.Pipeline().Do(req)
+ resp, err := client.pl.Do(req)
if err != nil {
- return BlobCreateSnapshotResponse{}, err
+ return blobClientCreateSnapshotResponse{}, err
}
if !runtime.HasStatusCode(resp, http.StatusCreated) {
- return BlobCreateSnapshotResponse{}, runtime.NewResponseError(resp)
+ return blobClientCreateSnapshotResponse{}, runtime.NewResponseError(resp)
}
return client.createSnapshotHandleResponse(resp)
}
// createSnapshotCreateRequest creates the CreateSnapshot request.
-func (client *blobClient) createSnapshotCreateRequest(ctx context.Context, blobCreateSnapshotOptions *BlobCreateSnapshotOptions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) {
- req, err := runtime.NewRequest(ctx, http.MethodPut, client.con.Endpoint())
+func (client *blobClient) createSnapshotCreateRequest(ctx context.Context, blobClientCreateSnapshotOptions *blobClientCreateSnapshotOptions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) {
+ req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint)
if err != nil {
return nil, err
}
reqQP := req.Raw().URL.Query()
reqQP.Set("comp", "snapshot")
- if blobCreateSnapshotOptions != nil && blobCreateSnapshotOptions.Timeout != nil {
- reqQP.Set("timeout", strconv.FormatInt(int64(*blobCreateSnapshotOptions.Timeout), 10))
+ if blobClientCreateSnapshotOptions != nil && blobClientCreateSnapshotOptions.Timeout != nil {
+ reqQP.Set("timeout", strconv.FormatInt(int64(*blobClientCreateSnapshotOptions.Timeout), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
- if blobCreateSnapshotOptions != nil && blobCreateSnapshotOptions.Metadata != nil {
- for k, v := range blobCreateSnapshotOptions.Metadata {
+ if blobClientCreateSnapshotOptions != nil && blobClientCreateSnapshotOptions.Metadata != nil {
+ for k, v := range blobClientCreateSnapshotOptions.Metadata {
req.Raw().Header.Set("x-ms-meta-"+k, v)
}
}
@@ -548,7 +603,7 @@ func (client *blobClient) createSnapshotCreateRequest(ctx context.Context, blobC
req.Raw().Header.Set("x-ms-encryption-key-sha256", *cpkInfo.EncryptionKeySHA256)
}
if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil {
- req.Raw().Header.Set("x-ms-encryption-algorithm", "AES256")
+ req.Raw().Header.Set("x-ms-encryption-algorithm", string(*cpkInfo.EncryptionAlgorithm))
}
if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil {
req.Raw().Header.Set("x-ms-encryption-scope", *cpkScopeInfo.EncryptionScope)
@@ -571,17 +626,17 @@ func (client *blobClient) createSnapshotCreateRequest(ctx context.Context, blobC
if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID)
}
- req.Raw().Header.Set("x-ms-version", "2019-12-12")
- if blobCreateSnapshotOptions != nil && blobCreateSnapshotOptions.RequestID != nil {
- req.Raw().Header.Set("x-ms-client-request-id", *blobCreateSnapshotOptions.RequestID)
+ req.Raw().Header.Set("x-ms-version", "2020-10-02")
+ if blobClientCreateSnapshotOptions != nil && blobClientCreateSnapshotOptions.RequestID != nil {
+ req.Raw().Header.Set("x-ms-client-request-id", *blobClientCreateSnapshotOptions.RequestID)
}
req.Raw().Header.Set("Accept", "application/xml")
return req, nil
}
// createSnapshotHandleResponse handles the CreateSnapshot response.
-func (client *blobClient) createSnapshotHandleResponse(resp *http.Response) (BlobCreateSnapshotResponse, error) {
- result := BlobCreateSnapshotResponse{RawResponse: resp}
+func (client *blobClient) createSnapshotHandleResponse(resp *http.Response) (blobClientCreateSnapshotResponse, error) {
+ result := blobClientCreateSnapshotResponse{RawResponse: resp}
if val := resp.Header.Get("x-ms-snapshot"); val != "" {
result.Snapshot = &val
}
@@ -591,7 +646,7 @@ func (client *blobClient) createSnapshotHandleResponse(resp *http.Response) (Blo
if val := resp.Header.Get("Last-Modified"); val != "" {
lastModified, err := time.Parse(time.RFC1123, val)
if err != nil {
- return BlobCreateSnapshotResponse{}, err
+ return blobClientCreateSnapshotResponse{}, err
}
result.LastModified = &lastModified
}
@@ -610,69 +665,75 @@ func (client *blobClient) createSnapshotHandleResponse(resp *http.Response) (Blo
if val := resp.Header.Get("Date"); val != "" {
date, err := time.Parse(time.RFC1123, val)
if err != nil {
- return BlobCreateSnapshotResponse{}, err
+ return blobClientCreateSnapshotResponse{}, err
}
result.Date = &date
}
if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" {
isServerEncrypted, err := strconv.ParseBool(val)
if err != nil {
- return BlobCreateSnapshotResponse{}, err
+ return blobClientCreateSnapshotResponse{}, err
}
result.IsServerEncrypted = &isServerEncrypted
}
return result, nil
}
-// Delete - If the storage account's soft delete feature is disabled then, when a blob is deleted, it is permanently removed from the storage account. If
-// the storage account's soft delete feature is enabled,
-// then, when a blob is deleted, it is marked for deletion and becomes inaccessible immediately. However, the blob service retains the blob or snapshot
-// for the number of days specified by the
-// DeleteRetentionPolicy section of Storage service properties [Set-Blob-Service-Properties.md]. After the specified number of days has passed, the blob's
-// data is permanently removed from the storage
-// account. Note that you continue to be charged for the soft-deleted blob's storage until it is permanently removed. Use the List Blobs API and specify
-// the "include=deleted" query parameter to discover
-// which blobs and snapshots have been soft deleted. You can then use the Undelete Blob API to restore a soft-deleted blob. All other operations on a soft-deleted
-// blob or snapshot causes the service to
+// Delete - If the storage account's soft delete feature is disabled then, when a blob is deleted, it is permanently removed
+// from the storage account. If the storage account's soft delete feature is enabled,
+// then, when a blob is deleted, it is marked for deletion and becomes inaccessible immediately. However, the blob service
+// retains the blob or snapshot for the number of days specified by the
+// DeleteRetentionPolicy section of Storage service properties [Set-Blob-Service-Properties.md]. After the specified number
+// of days has passed, the blob's data is permanently removed from the storage
+// account. Note that you continue to be charged for the soft-deleted blob's storage until it is permanently removed. Use
+// the List Blobs API and specify the "include=deleted" query parameter to discover
+// which blobs and snapshots have been soft deleted. You can then use the Undelete Blob API to restore a soft-deleted blob.
+// All other operations on a soft-deleted blob or snapshot causes the service to
// return an HTTP status code of 404 (ResourceNotFound).
-// If the operation fails it returns the *StorageError error type.
-func (client *blobClient) Delete(ctx context.Context, blobDeleteOptions *BlobDeleteOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (BlobDeleteResponse, error) {
- req, err := client.deleteCreateRequest(ctx, blobDeleteOptions, leaseAccessConditions, modifiedAccessConditions)
+// If the operation fails it returns an *azcore.ResponseError type.
+// blobClientDeleteOptions - blobClientDeleteOptions contains the optional parameters for the blobClient.Delete method.
+// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method.
+// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method.
+func (client *blobClient) Delete(ctx context.Context, blobClientDeleteOptions *blobClientDeleteOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (blobClientDeleteResponse, error) {
+ req, err := client.deleteCreateRequest(ctx, blobClientDeleteOptions, leaseAccessConditions, modifiedAccessConditions)
if err != nil {
- return BlobDeleteResponse{}, err
+ return blobClientDeleteResponse{}, err
}
- resp, err := client.con.Pipeline().Do(req)
+ resp, err := client.pl.Do(req)
if err != nil {
- return BlobDeleteResponse{}, err
+ return blobClientDeleteResponse{}, err
}
if !runtime.HasStatusCode(resp, http.StatusAccepted) {
- return BlobDeleteResponse{}, runtime.NewResponseError(resp)
+ return blobClientDeleteResponse{}, runtime.NewResponseError(resp)
}
return client.deleteHandleResponse(resp)
}
// deleteCreateRequest creates the Delete request.
-func (client *blobClient) deleteCreateRequest(ctx context.Context, blobDeleteOptions *BlobDeleteOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) {
- req, err := runtime.NewRequest(ctx, http.MethodDelete, client.con.Endpoint())
+func (client *blobClient) deleteCreateRequest(ctx context.Context, blobClientDeleteOptions *blobClientDeleteOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) {
+ req, err := runtime.NewRequest(ctx, http.MethodDelete, client.endpoint)
if err != nil {
return nil, err
}
reqQP := req.Raw().URL.Query()
- if blobDeleteOptions != nil && blobDeleteOptions.Snapshot != nil {
- reqQP.Set("snapshot", *blobDeleteOptions.Snapshot)
+ if blobClientDeleteOptions != nil && blobClientDeleteOptions.Snapshot != nil {
+ reqQP.Set("snapshot", *blobClientDeleteOptions.Snapshot)
}
- if blobDeleteOptions != nil && blobDeleteOptions.VersionID != nil {
- reqQP.Set("versionid", *blobDeleteOptions.VersionID)
+ if blobClientDeleteOptions != nil && blobClientDeleteOptions.VersionID != nil {
+ reqQP.Set("versionid", *blobClientDeleteOptions.VersionID)
}
- if blobDeleteOptions != nil && blobDeleteOptions.Timeout != nil {
- reqQP.Set("timeout", strconv.FormatInt(int64(*blobDeleteOptions.Timeout), 10))
+ if blobClientDeleteOptions != nil && blobClientDeleteOptions.Timeout != nil {
+ reqQP.Set("timeout", strconv.FormatInt(int64(*blobClientDeleteOptions.Timeout), 10))
+ }
+ if blobClientDeleteOptions != nil && blobClientDeleteOptions.BlobDeleteType != nil {
+ reqQP.Set("deletetype", string(*blobClientDeleteOptions.BlobDeleteType))
}
req.Raw().URL.RawQuery = reqQP.Encode()
if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID)
}
- if blobDeleteOptions != nil && blobDeleteOptions.DeleteSnapshots != nil {
- req.Raw().Header.Set("x-ms-delete-snapshots", string(*blobDeleteOptions.DeleteSnapshots))
+ if blobClientDeleteOptions != nil && blobClientDeleteOptions.DeleteSnapshots != nil {
+ req.Raw().Header.Set("x-ms-delete-snapshots", string(*blobClientDeleteOptions.DeleteSnapshots))
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123))
@@ -689,17 +750,17 @@ func (client *blobClient) deleteCreateRequest(ctx context.Context, blobDeleteOpt
if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil {
req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags)
}
- req.Raw().Header.Set("x-ms-version", "2019-12-12")
- if blobDeleteOptions != nil && blobDeleteOptions.RequestID != nil {
- req.Raw().Header.Set("x-ms-client-request-id", *blobDeleteOptions.RequestID)
+ req.Raw().Header.Set("x-ms-version", "2020-10-02")
+ if blobClientDeleteOptions != nil && blobClientDeleteOptions.RequestID != nil {
+ req.Raw().Header.Set("x-ms-client-request-id", *blobClientDeleteOptions.RequestID)
}
req.Raw().Header.Set("Accept", "application/xml")
return req, nil
}
// deleteHandleResponse handles the Delete response.
-func (client *blobClient) deleteHandleResponse(resp *http.Response) (BlobDeleteResponse, error) {
- result := BlobDeleteResponse{RawResponse: resp}
+func (client *blobClient) deleteHandleResponse(resp *http.Response) (blobClientDeleteResponse, error) {
+ result := blobClientDeleteResponse{RawResponse: resp}
if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
result.ClientRequestID = &val
}
@@ -712,60 +773,125 @@ func (client *blobClient) deleteHandleResponse(resp *http.Response) (BlobDeleteR
if val := resp.Header.Get("Date"); val != "" {
date, err := time.Parse(time.RFC1123, val)
if err != nil {
- return BlobDeleteResponse{}, err
+ return blobClientDeleteResponse{}, err
}
result.Date = &date
}
return result, nil
}
-// Download - The Download operation reads or downloads a blob from the system, including its metadata and properties. You can also call Download to read
-// a snapshot.
-// If the operation fails it returns the *StorageError error type.
-func (client *blobClient) Download(ctx context.Context, blobDownloadOptions *BlobDownloadOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, modifiedAccessConditions *ModifiedAccessConditions) (BlobDownloadResponse, error) {
- req, err := client.downloadCreateRequest(ctx, blobDownloadOptions, leaseAccessConditions, cpkInfo, modifiedAccessConditions)
+// DeleteImmutabilityPolicy - The Delete Immutability Policy operation deletes the immutability policy on the blob
+// If the operation fails it returns an *azcore.ResponseError type.
+// options - blobClientDeleteImmutabilityPolicyOptions contains the optional parameters for the blobClient.DeleteImmutabilityPolicy
+// method.
+func (client *blobClient) DeleteImmutabilityPolicy(ctx context.Context, options *blobClientDeleteImmutabilityPolicyOptions) (blobClientDeleteImmutabilityPolicyResponse, error) {
+ req, err := client.deleteImmutabilityPolicyCreateRequest(ctx, options)
if err != nil {
- return BlobDownloadResponse{}, err
+ return blobClientDeleteImmutabilityPolicyResponse{}, err
}
- resp, err := client.con.Pipeline().Do(req)
+ resp, err := client.pl.Do(req)
if err != nil {
- return BlobDownloadResponse{}, err
+ return blobClientDeleteImmutabilityPolicyResponse{}, err
}
- if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusPartialContent) {
- return BlobDownloadResponse{}, runtime.NewResponseError(resp)
+ if !runtime.HasStatusCode(resp, http.StatusOK) {
+ return blobClientDeleteImmutabilityPolicyResponse{}, runtime.NewResponseError(resp)
+ }
+ return client.deleteImmutabilityPolicyHandleResponse(resp)
+}
+
+// deleteImmutabilityPolicyCreateRequest creates the DeleteImmutabilityPolicy request.
+func (client *blobClient) deleteImmutabilityPolicyCreateRequest(ctx context.Context, options *blobClientDeleteImmutabilityPolicyOptions) (*policy.Request, error) {
+ req, err := runtime.NewRequest(ctx, http.MethodDelete, client.endpoint)
+ if err != nil {
+ return nil, err
+ }
+ reqQP := req.Raw().URL.Query()
+ reqQP.Set("comp", "immutabilityPolicies")
+ if options != nil && options.Timeout != nil {
+ reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
+ }
+ req.Raw().URL.RawQuery = reqQP.Encode()
+ req.Raw().Header.Set("x-ms-version", "2020-10-02")
+ if options != nil && options.RequestID != nil {
+ req.Raw().Header.Set("x-ms-client-request-id", *options.RequestID)
+ }
+ req.Raw().Header.Set("Accept", "application/xml")
+ return req, nil
+}
+
+// deleteImmutabilityPolicyHandleResponse handles the DeleteImmutabilityPolicy response.
+func (client *blobClient) deleteImmutabilityPolicyHandleResponse(resp *http.Response) (blobClientDeleteImmutabilityPolicyResponse, error) {
+ result := blobClientDeleteImmutabilityPolicyResponse{RawResponse: resp}
+ if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
+ result.ClientRequestID = &val
+ }
+ if val := resp.Header.Get("x-ms-request-id"); val != "" {
+ result.RequestID = &val
+ }
+ if val := resp.Header.Get("x-ms-version"); val != "" {
+ result.Version = &val
+ }
+ if val := resp.Header.Get("Date"); val != "" {
+ date, err := time.Parse(time.RFC1123, val)
+ if err != nil {
+ return blobClientDeleteImmutabilityPolicyResponse{}, err
+ }
+ result.Date = &date
+ }
+ return result, nil
+}
+
+// Download - The Download operation reads or downloads a blob from the system, including its metadata and properties. You
+// can also call Download to read a snapshot.
+// If the operation fails it returns an *azcore.ResponseError type.
+// blobClientDownloadOptions - blobClientDownloadOptions contains the optional parameters for the blobClient.Download method.
+// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method.
+// CpkInfo - CpkInfo contains a group of parameters for the blobClient.Download method.
+// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method.
+func (client *blobClient) Download(ctx context.Context, blobClientDownloadOptions *blobClientDownloadOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, modifiedAccessConditions *ModifiedAccessConditions) (blobClientDownloadResponse, error) {
+ req, err := client.downloadCreateRequest(ctx, blobClientDownloadOptions, leaseAccessConditions, cpkInfo, modifiedAccessConditions)
+ if err != nil {
+ return blobClientDownloadResponse{}, err
+ }
+ resp, err := client.pl.Do(req)
+ if err != nil {
+ return blobClientDownloadResponse{}, err
+ }
+ if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusPartialContent, http.StatusNotModified) {
+ return blobClientDownloadResponse{}, runtime.NewResponseError(resp)
}
return client.downloadHandleResponse(resp)
}
// downloadCreateRequest creates the Download request.
-func (client *blobClient) downloadCreateRequest(ctx context.Context, blobDownloadOptions *BlobDownloadOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) {
- req, err := runtime.NewRequest(ctx, http.MethodGet, client.con.Endpoint())
+func (client *blobClient) downloadCreateRequest(ctx context.Context, blobClientDownloadOptions *blobClientDownloadOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) {
+ req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint)
if err != nil {
return nil, err
}
reqQP := req.Raw().URL.Query()
- if blobDownloadOptions != nil && blobDownloadOptions.Snapshot != nil {
- reqQP.Set("snapshot", *blobDownloadOptions.Snapshot)
+ if blobClientDownloadOptions != nil && blobClientDownloadOptions.Snapshot != nil {
+ reqQP.Set("snapshot", *blobClientDownloadOptions.Snapshot)
}
- if blobDownloadOptions != nil && blobDownloadOptions.VersionID != nil {
- reqQP.Set("versionid", *blobDownloadOptions.VersionID)
+ if blobClientDownloadOptions != nil && blobClientDownloadOptions.VersionID != nil {
+ reqQP.Set("versionid", *blobClientDownloadOptions.VersionID)
}
- if blobDownloadOptions != nil && blobDownloadOptions.Timeout != nil {
- reqQP.Set("timeout", strconv.FormatInt(int64(*blobDownloadOptions.Timeout), 10))
+ if blobClientDownloadOptions != nil && blobClientDownloadOptions.Timeout != nil {
+ reqQP.Set("timeout", strconv.FormatInt(int64(*blobClientDownloadOptions.Timeout), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
runtime.SkipBodyDownload(req)
- if blobDownloadOptions != nil && blobDownloadOptions.Range != nil {
- req.Raw().Header.Set("x-ms-range", *blobDownloadOptions.Range)
+ if blobClientDownloadOptions != nil && blobClientDownloadOptions.Range != nil {
+ req.Raw().Header.Set("x-ms-range", *blobClientDownloadOptions.Range)
}
if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID)
}
- if blobDownloadOptions != nil && blobDownloadOptions.RangeGetContentMD5 != nil {
- req.Raw().Header.Set("x-ms-range-get-content-md5", strconv.FormatBool(*blobDownloadOptions.RangeGetContentMD5))
+ if blobClientDownloadOptions != nil && blobClientDownloadOptions.RangeGetContentMD5 != nil {
+ req.Raw().Header.Set("x-ms-range-get-content-md5", strconv.FormatBool(*blobClientDownloadOptions.RangeGetContentMD5))
}
- if blobDownloadOptions != nil && blobDownloadOptions.RangeGetContentCRC64 != nil {
- req.Raw().Header.Set("x-ms-range-get-content-crc64", strconv.FormatBool(*blobDownloadOptions.RangeGetContentCRC64))
+ if blobClientDownloadOptions != nil && blobClientDownloadOptions.RangeGetContentCRC64 != nil {
+ req.Raw().Header.Set("x-ms-range-get-content-crc64", strconv.FormatBool(*blobClientDownloadOptions.RangeGetContentCRC64))
}
if cpkInfo != nil && cpkInfo.EncryptionKey != nil {
req.Raw().Header.Set("x-ms-encryption-key", *cpkInfo.EncryptionKey)
@@ -774,7 +900,7 @@ func (client *blobClient) downloadCreateRequest(ctx context.Context, blobDownloa
req.Raw().Header.Set("x-ms-encryption-key-sha256", *cpkInfo.EncryptionKeySHA256)
}
if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil {
- req.Raw().Header.Set("x-ms-encryption-algorithm", "AES256")
+ req.Raw().Header.Set("x-ms-encryption-algorithm", string(*cpkInfo.EncryptionAlgorithm))
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123))
@@ -791,21 +917,21 @@ func (client *blobClient) downloadCreateRequest(ctx context.Context, blobDownloa
if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil {
req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags)
}
- req.Raw().Header.Set("x-ms-version", "2019-12-12")
- if blobDownloadOptions != nil && blobDownloadOptions.RequestID != nil {
- req.Raw().Header.Set("x-ms-client-request-id", *blobDownloadOptions.RequestID)
+ req.Raw().Header.Set("x-ms-version", "2020-10-02")
+ if blobClientDownloadOptions != nil && blobClientDownloadOptions.RequestID != nil {
+ req.Raw().Header.Set("x-ms-client-request-id", *blobClientDownloadOptions.RequestID)
}
req.Raw().Header.Set("Accept", "application/xml")
return req, nil
}
// downloadHandleResponse handles the Download response.
-func (client *blobClient) downloadHandleResponse(resp *http.Response) (BlobDownloadResponse, error) {
- result := BlobDownloadResponse{RawResponse: resp}
+func (client *blobClient) downloadHandleResponse(resp *http.Response) (blobClientDownloadResponse, error) {
+ result := blobClientDownloadResponse{RawResponse: resp}
if val := resp.Header.Get("Last-Modified"); val != "" {
lastModified, err := time.Parse(time.RFC1123, val)
if err != nil {
- return BlobDownloadResponse{}, err
+ return blobClientDownloadResponse{}, err
}
result.LastModified = &lastModified
}
@@ -831,7 +957,7 @@ func (client *blobClient) downloadHandleResponse(resp *http.Response) (BlobDownl
if val := resp.Header.Get("Content-Length"); val != "" {
contentLength, err := strconv.ParseInt(val, 10, 64)
if err != nil {
- return BlobDownloadResponse{}, err
+ return blobClientDownloadResponse{}, err
}
result.ContentLength = &contentLength
}
@@ -847,7 +973,7 @@ func (client *blobClient) downloadHandleResponse(resp *http.Response) (BlobDownl
if val := resp.Header.Get("Content-MD5"); val != "" {
contentMD5, err := base64.StdEncoding.DecodeString(val)
if err != nil {
- return BlobDownloadResponse{}, err
+ return blobClientDownloadResponse{}, err
}
result.ContentMD5 = contentMD5
}
@@ -866,7 +992,7 @@ func (client *blobClient) downloadHandleResponse(resp *http.Response) (BlobDownl
if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" {
blobSequenceNumber, err := strconv.ParseInt(val, 10, 64)
if err != nil {
- return BlobDownloadResponse{}, err
+ return blobClientDownloadResponse{}, err
}
result.BlobSequenceNumber = &blobSequenceNumber
}
@@ -876,7 +1002,7 @@ func (client *blobClient) downloadHandleResponse(resp *http.Response) (BlobDownl
if val := resp.Header.Get("x-ms-copy-completion-time"); val != "" {
copyCompletionTime, err := time.Parse(time.RFC1123, val)
if err != nil {
- return BlobDownloadResponse{}, err
+ return blobClientDownloadResponse{}, err
}
result.CopyCompletionTime = ©CompletionTime
}
@@ -919,7 +1045,7 @@ func (client *blobClient) downloadHandleResponse(resp *http.Response) (BlobDownl
if val := resp.Header.Get("x-ms-is-current-version"); val != "" {
isCurrentVersion, err := strconv.ParseBool(val)
if err != nil {
- return BlobDownloadResponse{}, err
+ return blobClientDownloadResponse{}, err
}
result.IsCurrentVersion = &isCurrentVersion
}
@@ -929,7 +1055,7 @@ func (client *blobClient) downloadHandleResponse(resp *http.Response) (BlobDownl
if val := resp.Header.Get("Date"); val != "" {
date, err := time.Parse(time.RFC1123, val)
if err != nil {
- return BlobDownloadResponse{}, err
+ return blobClientDownloadResponse{}, err
}
result.Date = &date
}
@@ -937,14 +1063,14 @@ func (client *blobClient) downloadHandleResponse(resp *http.Response) (BlobDownl
blobCommittedBlockCount32, err := strconv.ParseInt(val, 10, 32)
blobCommittedBlockCount := int32(blobCommittedBlockCount32)
if err != nil {
- return BlobDownloadResponse{}, err
+ return blobClientDownloadResponse{}, err
}
result.BlobCommittedBlockCount = &blobCommittedBlockCount
}
if val := resp.Header.Get("x-ms-server-encrypted"); val != "" {
isServerEncrypted, err := strconv.ParseBool(val)
if err != nil {
- return BlobDownloadResponse{}, err
+ return blobClientDownloadResponse{}, err
}
result.IsServerEncrypted = &isServerEncrypted
}
@@ -957,150 +1083,82 @@ func (client *blobClient) downloadHandleResponse(resp *http.Response) (BlobDownl
if val := resp.Header.Get("x-ms-blob-content-md5"); val != "" {
blobContentMD5, err := base64.StdEncoding.DecodeString(val)
if err != nil {
- return BlobDownloadResponse{}, err
+ return blobClientDownloadResponse{}, err
}
result.BlobContentMD5 = blobContentMD5
}
if val := resp.Header.Get("x-ms-tag-count"); val != "" {
tagCount, err := strconv.ParseInt(val, 10, 64)
if err != nil {
- return BlobDownloadResponse{}, err
+ return blobClientDownloadResponse{}, err
}
result.TagCount = &tagCount
}
if val := resp.Header.Get("x-ms-blob-sealed"); val != "" {
isSealed, err := strconv.ParseBool(val)
if err != nil {
- return BlobDownloadResponse{}, err
+ return blobClientDownloadResponse{}, err
}
result.IsSealed = &isSealed
}
- if val := resp.Header.Get("x-ms-content-crc64"); val != "" {
- contentCRC64, err := base64.StdEncoding.DecodeString(val)
+ if val := resp.Header.Get("x-ms-last-access-time"); val != "" {
+ lastAccessed, err := time.Parse(time.RFC1123, val)
if err != nil {
- return BlobDownloadResponse{}, err
+ return blobClientDownloadResponse{}, err
}
- result.ContentCRC64 = contentCRC64
- }
- return result, nil
-}
-
-// GetAccessControl - Get the owner, group, permissions, or access control list for a blob.
-// If the operation fails it returns the *DataLakeStorageError error type.
-func (client *blobClient) GetAccessControl(ctx context.Context, blobGetAccessControlOptions *BlobGetAccessControlOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (BlobGetAccessControlResponse, error) {
- req, err := client.getAccessControlCreateRequest(ctx, blobGetAccessControlOptions, leaseAccessConditions, modifiedAccessConditions)
- if err != nil {
- return BlobGetAccessControlResponse{}, err
- }
- resp, err := client.con.Pipeline().Do(req)
- if err != nil {
- return BlobGetAccessControlResponse{}, err
- }
- if !runtime.HasStatusCode(resp, http.StatusOK) {
- return BlobGetAccessControlResponse{}, runtime.NewResponseError(resp)
- }
- return client.getAccessControlHandleResponse(resp)
-}
-
-// getAccessControlCreateRequest creates the GetAccessControl request.
-func (client *blobClient) getAccessControlCreateRequest(ctx context.Context, blobGetAccessControlOptions *BlobGetAccessControlOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) {
- req, err := runtime.NewRequest(ctx, http.MethodHead, client.con.Endpoint())
- if err != nil {
- return nil, err
- }
- reqQP := req.Raw().URL.Query()
- reqQP.Set("action", "getAccessControl")
- if blobGetAccessControlOptions != nil && blobGetAccessControlOptions.Timeout != nil {
- reqQP.Set("timeout", strconv.FormatInt(int64(*blobGetAccessControlOptions.Timeout), 10))
- }
- if blobGetAccessControlOptions != nil && blobGetAccessControlOptions.Upn != nil {
- reqQP.Set("upn", strconv.FormatBool(*blobGetAccessControlOptions.Upn))
- }
- req.Raw().URL.RawQuery = reqQP.Encode()
- if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
- req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID)
- }
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil {
- req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch)
- }
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil {
- req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch)
- }
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
- req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123))
- }
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
- req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123))
- }
- if blobGetAccessControlOptions != nil && blobGetAccessControlOptions.RequestID != nil {
- req.Raw().Header.Set("x-ms-client-request-id", *blobGetAccessControlOptions.RequestID)
+ result.LastAccessed = &lastAccessed
}
- req.Raw().Header.Set("x-ms-version", "2019-12-12")
- req.Raw().Header.Set("Accept", "application/xml")
- return req, nil
-}
-
-// getAccessControlHandleResponse handles the GetAccessControl response.
-func (client *blobClient) getAccessControlHandleResponse(resp *http.Response) (BlobGetAccessControlResponse, error) {
- result := BlobGetAccessControlResponse{RawResponse: resp}
- if val := resp.Header.Get("Date"); val != "" {
- date, err := time.Parse(time.RFC1123, val)
+ if val := resp.Header.Get("x-ms-immutability-policy-until-date"); val != "" {
+ immutabilityPolicyExpiresOn, err := time.Parse(time.RFC1123, val)
if err != nil {
- return BlobGetAccessControlResponse{}, err
+ return blobClientDownloadResponse{}, err
}
- result.Date = &date
+ result.ImmutabilityPolicyExpiresOn = &immutabilityPolicyExpiresOn
}
- if val := resp.Header.Get("ETag"); val != "" {
- result.ETag = &val
+ if val := resp.Header.Get("x-ms-immutability-policy-mode"); val != "" {
+ result.ImmutabilityPolicyMode = (*BlobImmutabilityPolicyMode)(&val)
}
- if val := resp.Header.Get("Last-Modified"); val != "" {
- lastModified, err := time.Parse(time.RFC1123, val)
+ if val := resp.Header.Get("x-ms-legal-hold"); val != "" {
+ legalHold, err := strconv.ParseBool(val)
if err != nil {
- return BlobGetAccessControlResponse{}, err
+ return blobClientDownloadResponse{}, err
}
- result.LastModified = &lastModified
+ result.LegalHold = &legalHold
}
- if val := resp.Header.Get("x-ms-owner"); val != "" {
- result.XMSOwner = &val
- }
- if val := resp.Header.Get("x-ms-group"); val != "" {
- result.XMSGroup = &val
- }
- if val := resp.Header.Get("x-ms-permissions"); val != "" {
- result.XMSPermissions = &val
- }
- if val := resp.Header.Get("x-ms-acl"); val != "" {
- result.XMSACL = &val
- }
- if val := resp.Header.Get("x-ms-request-id"); val != "" {
- result.RequestID = &val
+ if val := resp.Header.Get("x-ms-content-crc64"); val != "" {
+ contentCRC64, err := base64.StdEncoding.DecodeString(val)
+ if err != nil {
+ return blobClientDownloadResponse{}, err
+ }
+ result.ContentCRC64 = contentCRC64
}
- if val := resp.Header.Get("x-ms-version"); val != "" {
- result.Version = &val
+ if val := resp.Header.Get("x-ms-error-code"); val != "" {
+ result.ErrorCode = &val
}
return result, nil
}
// GetAccountInfo - Returns the sku name and account kind
-// If the operation fails it returns the *StorageError error type.
-func (client *blobClient) GetAccountInfo(ctx context.Context, options *BlobGetAccountInfoOptions) (BlobGetAccountInfoResponse, error) {
+// If the operation fails it returns an *azcore.ResponseError type.
+// options - blobClientGetAccountInfoOptions contains the optional parameters for the blobClient.GetAccountInfo method.
+func (client *blobClient) GetAccountInfo(ctx context.Context, options *blobClientGetAccountInfoOptions) (blobClientGetAccountInfoResponse, error) {
req, err := client.getAccountInfoCreateRequest(ctx, options)
if err != nil {
- return BlobGetAccountInfoResponse{}, err
+ return blobClientGetAccountInfoResponse{}, err
}
- resp, err := client.con.Pipeline().Do(req)
+ resp, err := client.pl.Do(req)
if err != nil {
- return BlobGetAccountInfoResponse{}, err
+ return blobClientGetAccountInfoResponse{}, err
}
if !runtime.HasStatusCode(resp, http.StatusOK) {
- return BlobGetAccountInfoResponse{}, runtime.NewResponseError(resp)
+ return blobClientGetAccountInfoResponse{}, runtime.NewResponseError(resp)
}
return client.getAccountInfoHandleResponse(resp)
}
// getAccountInfoCreateRequest creates the GetAccountInfo request.
-func (client *blobClient) getAccountInfoCreateRequest(ctx context.Context, options *BlobGetAccountInfoOptions) (*policy.Request, error) {
- req, err := runtime.NewRequest(ctx, http.MethodGet, client.con.Endpoint())
+func (client *blobClient) getAccountInfoCreateRequest(ctx context.Context, options *blobClientGetAccountInfoOptions) (*policy.Request, error) {
+ req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint)
if err != nil {
return nil, err
}
@@ -1108,14 +1166,14 @@ func (client *blobClient) getAccountInfoCreateRequest(ctx context.Context, optio
reqQP.Set("restype", "account")
reqQP.Set("comp", "properties")
req.Raw().URL.RawQuery = reqQP.Encode()
- req.Raw().Header.Set("x-ms-version", "2019-12-12")
+ req.Raw().Header.Set("x-ms-version", "2020-10-02")
req.Raw().Header.Set("Accept", "application/xml")
return req, nil
}
// getAccountInfoHandleResponse handles the GetAccountInfo response.
-func (client *blobClient) getAccountInfoHandleResponse(resp *http.Response) (BlobGetAccountInfoResponse, error) {
- result := BlobGetAccountInfoResponse{RawResponse: resp}
+func (client *blobClient) getAccountInfoHandleResponse(resp *http.Response) (blobClientGetAccountInfoResponse, error) {
+ result := blobClientGetAccountInfoResponse{RawResponse: resp}
if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
result.ClientRequestID = &val
}
@@ -1128,7 +1186,7 @@ func (client *blobClient) getAccountInfoHandleResponse(resp *http.Response) (Blo
if val := resp.Header.Get("Date"); val != "" {
date, err := time.Parse(time.RFC1123, val)
if err != nil {
- return BlobGetAccountInfoResponse{}, err
+ return blobClientGetAccountInfoResponse{}, err
}
result.Date = &date
}
@@ -1141,39 +1199,44 @@ func (client *blobClient) getAccountInfoHandleResponse(resp *http.Response) (Blo
return result, nil
}
-// GetProperties - The Get Properties operation returns all user-defined metadata, standard HTTP properties, and system properties for the blob. It does
-// not return the content of the blob.
-// If the operation fails it returns the *StorageError error type.
-func (client *blobClient) GetProperties(ctx context.Context, blobGetPropertiesOptions *BlobGetPropertiesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, modifiedAccessConditions *ModifiedAccessConditions) (BlobGetPropertiesResponse, error) {
- req, err := client.getPropertiesCreateRequest(ctx, blobGetPropertiesOptions, leaseAccessConditions, cpkInfo, modifiedAccessConditions)
+// GetProperties - The Get Properties operation returns all user-defined metadata, standard HTTP properties, and system properties
+// for the blob. It does not return the content of the blob.
+// If the operation fails it returns an *azcore.ResponseError type.
+// blobClientGetPropertiesOptions - blobClientGetPropertiesOptions contains the optional parameters for the blobClient.GetProperties
+// method.
+// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method.
+// CpkInfo - CpkInfo contains a group of parameters for the blobClient.Download method.
+// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method.
+func (client *blobClient) GetProperties(ctx context.Context, blobClientGetPropertiesOptions *blobClientGetPropertiesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, modifiedAccessConditions *ModifiedAccessConditions) (blobClientGetPropertiesResponse, error) {
+ req, err := client.getPropertiesCreateRequest(ctx, blobClientGetPropertiesOptions, leaseAccessConditions, cpkInfo, modifiedAccessConditions)
if err != nil {
- return BlobGetPropertiesResponse{}, err
+ return blobClientGetPropertiesResponse{}, err
}
- resp, err := client.con.Pipeline().Do(req)
+ resp, err := client.pl.Do(req)
if err != nil {
- return BlobGetPropertiesResponse{}, err
+ return blobClientGetPropertiesResponse{}, err
}
if !runtime.HasStatusCode(resp, http.StatusOK) {
- return BlobGetPropertiesResponse{}, runtime.NewResponseError(resp)
+ return blobClientGetPropertiesResponse{}, runtime.NewResponseError(resp)
}
return client.getPropertiesHandleResponse(resp)
}
// getPropertiesCreateRequest creates the GetProperties request.
-func (client *blobClient) getPropertiesCreateRequest(ctx context.Context, blobGetPropertiesOptions *BlobGetPropertiesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) {
- req, err := runtime.NewRequest(ctx, http.MethodHead, client.con.Endpoint())
+func (client *blobClient) getPropertiesCreateRequest(ctx context.Context, blobClientGetPropertiesOptions *blobClientGetPropertiesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) {
+ req, err := runtime.NewRequest(ctx, http.MethodHead, client.endpoint)
if err != nil {
return nil, err
}
reqQP := req.Raw().URL.Query()
- if blobGetPropertiesOptions != nil && blobGetPropertiesOptions.Snapshot != nil {
- reqQP.Set("snapshot", *blobGetPropertiesOptions.Snapshot)
+ if blobClientGetPropertiesOptions != nil && blobClientGetPropertiesOptions.Snapshot != nil {
+ reqQP.Set("snapshot", *blobClientGetPropertiesOptions.Snapshot)
}
- if blobGetPropertiesOptions != nil && blobGetPropertiesOptions.VersionID != nil {
- reqQP.Set("versionid", *blobGetPropertiesOptions.VersionID)
+ if blobClientGetPropertiesOptions != nil && blobClientGetPropertiesOptions.VersionID != nil {
+ reqQP.Set("versionid", *blobClientGetPropertiesOptions.VersionID)
}
- if blobGetPropertiesOptions != nil && blobGetPropertiesOptions.Timeout != nil {
- reqQP.Set("timeout", strconv.FormatInt(int64(*blobGetPropertiesOptions.Timeout), 10))
+ if blobClientGetPropertiesOptions != nil && blobClientGetPropertiesOptions.Timeout != nil {
+ reqQP.Set("timeout", strconv.FormatInt(int64(*blobClientGetPropertiesOptions.Timeout), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
@@ -1186,7 +1249,7 @@ func (client *blobClient) getPropertiesCreateRequest(ctx context.Context, blobGe
req.Raw().Header.Set("x-ms-encryption-key-sha256", *cpkInfo.EncryptionKeySHA256)
}
if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil {
- req.Raw().Header.Set("x-ms-encryption-algorithm", "AES256")
+ req.Raw().Header.Set("x-ms-encryption-algorithm", string(*cpkInfo.EncryptionAlgorithm))
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123))
@@ -1203,28 +1266,28 @@ func (client *blobClient) getPropertiesCreateRequest(ctx context.Context, blobGe
if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil {
req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags)
}
- req.Raw().Header.Set("x-ms-version", "2019-12-12")
- if blobGetPropertiesOptions != nil && blobGetPropertiesOptions.RequestID != nil {
- req.Raw().Header.Set("x-ms-client-request-id", *blobGetPropertiesOptions.RequestID)
+ req.Raw().Header.Set("x-ms-version", "2020-10-02")
+ if blobClientGetPropertiesOptions != nil && blobClientGetPropertiesOptions.RequestID != nil {
+ req.Raw().Header.Set("x-ms-client-request-id", *blobClientGetPropertiesOptions.RequestID)
}
req.Raw().Header.Set("Accept", "application/xml")
return req, nil
}
// getPropertiesHandleResponse handles the GetProperties response.
-func (client *blobClient) getPropertiesHandleResponse(resp *http.Response) (BlobGetPropertiesResponse, error) {
- result := BlobGetPropertiesResponse{RawResponse: resp}
+func (client *blobClient) getPropertiesHandleResponse(resp *http.Response) (blobClientGetPropertiesResponse, error) {
+ result := blobClientGetPropertiesResponse{RawResponse: resp}
if val := resp.Header.Get("Last-Modified"); val != "" {
lastModified, err := time.Parse(time.RFC1123, val)
if err != nil {
- return BlobGetPropertiesResponse{}, err
+ return blobClientGetPropertiesResponse{}, err
}
result.LastModified = &lastModified
}
if val := resp.Header.Get("x-ms-creation-time"); val != "" {
creationTime, err := time.Parse(time.RFC1123, val)
if err != nil {
- return BlobGetPropertiesResponse{}, err
+ return blobClientGetPropertiesResponse{}, err
}
result.CreationTime = &creationTime
}
@@ -1253,7 +1316,7 @@ func (client *blobClient) getPropertiesHandleResponse(resp *http.Response) (Blob
if val := resp.Header.Get("x-ms-copy-completion-time"); val != "" {
copyCompletionTime, err := time.Parse(time.RFC1123, val)
if err != nil {
- return BlobGetPropertiesResponse{}, err
+ return blobClientGetPropertiesResponse{}, err
}
result.CopyCompletionTime = ©CompletionTime
}
@@ -1275,7 +1338,7 @@ func (client *blobClient) getPropertiesHandleResponse(resp *http.Response) (Blob
if val := resp.Header.Get("x-ms-incremental-copy"); val != "" {
isIncrementalCopy, err := strconv.ParseBool(val)
if err != nil {
- return BlobGetPropertiesResponse{}, err
+ return blobClientGetPropertiesResponse{}, err
}
result.IsIncrementalCopy = &isIncrementalCopy
}
@@ -1294,7 +1357,7 @@ func (client *blobClient) getPropertiesHandleResponse(resp *http.Response) (Blob
if val := resp.Header.Get("Content-Length"); val != "" {
contentLength, err := strconv.ParseInt(val, 10, 64)
if err != nil {
- return BlobGetPropertiesResponse{}, err
+ return blobClientGetPropertiesResponse{}, err
}
result.ContentLength = &contentLength
}
@@ -1307,7 +1370,7 @@ func (client *blobClient) getPropertiesHandleResponse(resp *http.Response) (Blob
if val := resp.Header.Get("Content-MD5"); val != "" {
contentMD5, err := base64.StdEncoding.DecodeString(val)
if err != nil {
- return BlobGetPropertiesResponse{}, err
+ return blobClientGetPropertiesResponse{}, err
}
result.ContentMD5 = contentMD5
}
@@ -1326,7 +1389,7 @@ func (client *blobClient) getPropertiesHandleResponse(resp *http.Response) (Blob
if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" {
blobSequenceNumber, err := strconv.ParseInt(val, 10, 64)
if err != nil {
- return BlobGetPropertiesResponse{}, err
+ return blobClientGetPropertiesResponse{}, err
}
result.BlobSequenceNumber = &blobSequenceNumber
}
@@ -1342,7 +1405,7 @@ func (client *blobClient) getPropertiesHandleResponse(resp *http.Response) (Blob
if val := resp.Header.Get("Date"); val != "" {
date, err := time.Parse(time.RFC1123, val)
if err != nil {
- return BlobGetPropertiesResponse{}, err
+ return blobClientGetPropertiesResponse{}, err
}
result.Date = &date
}
@@ -1353,14 +1416,14 @@ func (client *blobClient) getPropertiesHandleResponse(resp *http.Response) (Blob
blobCommittedBlockCount32, err := strconv.ParseInt(val, 10, 32)
blobCommittedBlockCount := int32(blobCommittedBlockCount32)
if err != nil {
- return BlobGetPropertiesResponse{}, err
+ return blobClientGetPropertiesResponse{}, err
}
result.BlobCommittedBlockCount = &blobCommittedBlockCount
}
if val := resp.Header.Get("x-ms-server-encrypted"); val != "" {
isServerEncrypted, err := strconv.ParseBool(val)
if err != nil {
- return BlobGetPropertiesResponse{}, err
+ return blobClientGetPropertiesResponse{}, err
}
result.IsServerEncrypted = &isServerEncrypted
}
@@ -1376,7 +1439,7 @@ func (client *blobClient) getPropertiesHandleResponse(resp *http.Response) (Blob
if val := resp.Header.Get("x-ms-access-tier-inferred"); val != "" {
accessTierInferred, err := strconv.ParseBool(val)
if err != nil {
- return BlobGetPropertiesResponse{}, err
+ return blobClientGetPropertiesResponse{}, err
}
result.AccessTierInferred = &accessTierInferred
}
@@ -1386,7 +1449,7 @@ func (client *blobClient) getPropertiesHandleResponse(resp *http.Response) (Blob
if val := resp.Header.Get("x-ms-access-tier-change-time"); val != "" {
accessTierChangeTime, err := time.Parse(time.RFC1123, val)
if err != nil {
- return BlobGetPropertiesResponse{}, err
+ return blobClientGetPropertiesResponse{}, err
}
result.AccessTierChangeTime = &accessTierChangeTime
}
@@ -1396,86 +1459,116 @@ func (client *blobClient) getPropertiesHandleResponse(resp *http.Response) (Blob
if val := resp.Header.Get("x-ms-is-current-version"); val != "" {
isCurrentVersion, err := strconv.ParseBool(val)
if err != nil {
- return BlobGetPropertiesResponse{}, err
+ return blobClientGetPropertiesResponse{}, err
}
result.IsCurrentVersion = &isCurrentVersion
}
if val := resp.Header.Get("x-ms-tag-count"); val != "" {
tagCount, err := strconv.ParseInt(val, 10, 64)
if err != nil {
- return BlobGetPropertiesResponse{}, err
+ return blobClientGetPropertiesResponse{}, err
}
result.TagCount = &tagCount
}
if val := resp.Header.Get("x-ms-expiry-time"); val != "" {
expiresOn, err := time.Parse(time.RFC1123, val)
if err != nil {
- return BlobGetPropertiesResponse{}, err
+ return blobClientGetPropertiesResponse{}, err
}
result.ExpiresOn = &expiresOn
}
if val := resp.Header.Get("x-ms-blob-sealed"); val != "" {
isSealed, err := strconv.ParseBool(val)
if err != nil {
- return BlobGetPropertiesResponse{}, err
+ return blobClientGetPropertiesResponse{}, err
}
result.IsSealed = &isSealed
}
if val := resp.Header.Get("x-ms-rehydrate-priority"); val != "" {
result.RehydratePriority = &val
}
+ if val := resp.Header.Get("x-ms-last-access-time"); val != "" {
+ lastAccessed, err := time.Parse(time.RFC1123, val)
+ if err != nil {
+ return blobClientGetPropertiesResponse{}, err
+ }
+ result.LastAccessed = &lastAccessed
+ }
+ if val := resp.Header.Get("x-ms-immutability-policy-until-date"); val != "" {
+ immutabilityPolicyExpiresOn, err := time.Parse(time.RFC1123, val)
+ if err != nil {
+ return blobClientGetPropertiesResponse{}, err
+ }
+ result.ImmutabilityPolicyExpiresOn = &immutabilityPolicyExpiresOn
+ }
+ if val := resp.Header.Get("x-ms-immutability-policy-mode"); val != "" {
+ result.ImmutabilityPolicyMode = (*BlobImmutabilityPolicyMode)(&val)
+ }
+ if val := resp.Header.Get("x-ms-legal-hold"); val != "" {
+ legalHold, err := strconv.ParseBool(val)
+ if err != nil {
+ return blobClientGetPropertiesResponse{}, err
+ }
+ result.LegalHold = &legalHold
+ }
return result, nil
}
// GetTags - The Get Tags operation enables users to get the tags associated with a blob.
-// If the operation fails it returns the *StorageError error type.
-func (client *blobClient) GetTags(ctx context.Context, blobGetTagsOptions *BlobGetTagsOptions, modifiedAccessConditions *ModifiedAccessConditions) (BlobGetTagsResponse, error) {
- req, err := client.getTagsCreateRequest(ctx, blobGetTagsOptions, modifiedAccessConditions)
+// If the operation fails it returns an *azcore.ResponseError type.
+// blobClientGetTagsOptions - blobClientGetTagsOptions contains the optional parameters for the blobClient.GetTags method.
+// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method.
+// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method.
+func (client *blobClient) GetTags(ctx context.Context, blobClientGetTagsOptions *blobClientGetTagsOptions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (blobClientGetTagsResponse, error) {
+ req, err := client.getTagsCreateRequest(ctx, blobClientGetTagsOptions, modifiedAccessConditions, leaseAccessConditions)
if err != nil {
- return BlobGetTagsResponse{}, err
+ return blobClientGetTagsResponse{}, err
}
- resp, err := client.con.Pipeline().Do(req)
+ resp, err := client.pl.Do(req)
if err != nil {
- return BlobGetTagsResponse{}, err
+ return blobClientGetTagsResponse{}, err
}
if !runtime.HasStatusCode(resp, http.StatusOK) {
- return BlobGetTagsResponse{}, runtime.NewResponseError(resp)
+ return blobClientGetTagsResponse{}, runtime.NewResponseError(resp)
}
return client.getTagsHandleResponse(resp)
}
// getTagsCreateRequest creates the GetTags request.
-func (client *blobClient) getTagsCreateRequest(ctx context.Context, blobGetTagsOptions *BlobGetTagsOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) {
- req, err := runtime.NewRequest(ctx, http.MethodGet, client.con.Endpoint())
+func (client *blobClient) getTagsCreateRequest(ctx context.Context, blobClientGetTagsOptions *blobClientGetTagsOptions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) {
+ req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint)
if err != nil {
return nil, err
}
reqQP := req.Raw().URL.Query()
reqQP.Set("comp", "tags")
- if blobGetTagsOptions != nil && blobGetTagsOptions.Timeout != nil {
- reqQP.Set("timeout", strconv.FormatInt(int64(*blobGetTagsOptions.Timeout), 10))
+ if blobClientGetTagsOptions != nil && blobClientGetTagsOptions.Timeout != nil {
+ reqQP.Set("timeout", strconv.FormatInt(int64(*blobClientGetTagsOptions.Timeout), 10))
}
- if blobGetTagsOptions != nil && blobGetTagsOptions.Snapshot != nil {
- reqQP.Set("snapshot", *blobGetTagsOptions.Snapshot)
+ if blobClientGetTagsOptions != nil && blobClientGetTagsOptions.Snapshot != nil {
+ reqQP.Set("snapshot", *blobClientGetTagsOptions.Snapshot)
}
- if blobGetTagsOptions != nil && blobGetTagsOptions.VersionID != nil {
- reqQP.Set("versionid", *blobGetTagsOptions.VersionID)
+ if blobClientGetTagsOptions != nil && blobClientGetTagsOptions.VersionID != nil {
+ reqQP.Set("versionid", *blobClientGetTagsOptions.VersionID)
}
req.Raw().URL.RawQuery = reqQP.Encode()
- req.Raw().Header.Set("x-ms-version", "2019-12-12")
- if blobGetTagsOptions != nil && blobGetTagsOptions.RequestID != nil {
- req.Raw().Header.Set("x-ms-client-request-id", *blobGetTagsOptions.RequestID)
+ req.Raw().Header.Set("x-ms-version", "2020-10-02")
+ if blobClientGetTagsOptions != nil && blobClientGetTagsOptions.RequestID != nil {
+ req.Raw().Header.Set("x-ms-client-request-id", *blobClientGetTagsOptions.RequestID)
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil {
req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags)
}
+ if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
+ req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID)
+ }
req.Raw().Header.Set("Accept", "application/xml")
return req, nil
}
// getTagsHandleResponse handles the GetTags response.
-func (client *blobClient) getTagsHandleResponse(resp *http.Response) (BlobGetTagsResponse, error) {
- result := BlobGetTagsResponse{RawResponse: resp}
+func (client *blobClient) getTagsHandleResponse(resp *http.Response) (blobClientGetTagsResponse, error) {
+ result := blobClientGetTagsResponse{RawResponse: resp}
if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
result.ClientRequestID = &val
}
@@ -1488,46 +1581,50 @@ func (client *blobClient) getTagsHandleResponse(resp *http.Response) (BlobGetTag
if val := resp.Header.Get("Date"); val != "" {
date, err := time.Parse(time.RFC1123, val)
if err != nil {
- return BlobGetTagsResponse{}, err
+ return blobClientGetTagsResponse{}, err
}
result.Date = &date
}
if err := runtime.UnmarshalAsXML(resp, &result.BlobTags); err != nil {
- return BlobGetTagsResponse{}, err
+ return blobClientGetTagsResponse{}, err
}
return result, nil
}
// Query - The Query operation enables users to select/project on blob data by providing simple query expressions.
-// If the operation fails it returns the *StorageError error type.
-func (client *blobClient) Query(ctx context.Context, blobQueryOptions *BlobQueryOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, modifiedAccessConditions *ModifiedAccessConditions) (BlobQueryResponse, error) {
- req, err := client.queryCreateRequest(ctx, blobQueryOptions, leaseAccessConditions, cpkInfo, modifiedAccessConditions)
+// If the operation fails it returns an *azcore.ResponseError type.
+// blobClientQueryOptions - blobClientQueryOptions contains the optional parameters for the blobClient.Query method.
+// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method.
+// CpkInfo - CpkInfo contains a group of parameters for the blobClient.Download method.
+// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method.
+func (client *blobClient) Query(ctx context.Context, blobClientQueryOptions *blobClientQueryOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, modifiedAccessConditions *ModifiedAccessConditions) (blobClientQueryResponse, error) {
+ req, err := client.queryCreateRequest(ctx, blobClientQueryOptions, leaseAccessConditions, cpkInfo, modifiedAccessConditions)
if err != nil {
- return BlobQueryResponse{}, err
+ return blobClientQueryResponse{}, err
}
- resp, err := client.con.Pipeline().Do(req)
+ resp, err := client.pl.Do(req)
if err != nil {
- return BlobQueryResponse{}, err
+ return blobClientQueryResponse{}, err
}
if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusPartialContent) {
- return BlobQueryResponse{}, runtime.NewResponseError(resp)
+ return blobClientQueryResponse{}, runtime.NewResponseError(resp)
}
return client.queryHandleResponse(resp)
}
// queryCreateRequest creates the Query request.
-func (client *blobClient) queryCreateRequest(ctx context.Context, blobQueryOptions *BlobQueryOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) {
- req, err := runtime.NewRequest(ctx, http.MethodPost, client.con.Endpoint())
+func (client *blobClient) queryCreateRequest(ctx context.Context, blobClientQueryOptions *blobClientQueryOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) {
+ req, err := runtime.NewRequest(ctx, http.MethodPost, client.endpoint)
if err != nil {
return nil, err
}
reqQP := req.Raw().URL.Query()
reqQP.Set("comp", "query")
- if blobQueryOptions != nil && blobQueryOptions.Snapshot != nil {
- reqQP.Set("snapshot", *blobQueryOptions.Snapshot)
+ if blobClientQueryOptions != nil && blobClientQueryOptions.Snapshot != nil {
+ reqQP.Set("snapshot", *blobClientQueryOptions.Snapshot)
}
- if blobQueryOptions != nil && blobQueryOptions.Timeout != nil {
- reqQP.Set("timeout", strconv.FormatInt(int64(*blobQueryOptions.Timeout), 10))
+ if blobClientQueryOptions != nil && blobClientQueryOptions.Timeout != nil {
+ reqQP.Set("timeout", strconv.FormatInt(int64(*blobClientQueryOptions.Timeout), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
runtime.SkipBodyDownload(req)
@@ -1541,7 +1638,7 @@ func (client *blobClient) queryCreateRequest(ctx context.Context, blobQueryOptio
req.Raw().Header.Set("x-ms-encryption-key-sha256", *cpkInfo.EncryptionKeySHA256)
}
if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil {
- req.Raw().Header.Set("x-ms-encryption-algorithm", "AES256")
+ req.Raw().Header.Set("x-ms-encryption-algorithm", string(*cpkInfo.EncryptionAlgorithm))
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123))
@@ -1558,24 +1655,24 @@ func (client *blobClient) queryCreateRequest(ctx context.Context, blobQueryOptio
if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil {
req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags)
}
- req.Raw().Header.Set("x-ms-version", "2019-12-12")
- if blobQueryOptions != nil && blobQueryOptions.RequestID != nil {
- req.Raw().Header.Set("x-ms-client-request-id", *blobQueryOptions.RequestID)
+ req.Raw().Header.Set("x-ms-version", "2020-10-02")
+ if blobClientQueryOptions != nil && blobClientQueryOptions.RequestID != nil {
+ req.Raw().Header.Set("x-ms-client-request-id", *blobClientQueryOptions.RequestID)
}
req.Raw().Header.Set("Accept", "application/xml")
- if blobQueryOptions != nil && blobQueryOptions.QueryRequest != nil {
- return req, runtime.MarshalAsXML(req, *blobQueryOptions.QueryRequest)
+ if blobClientQueryOptions != nil && blobClientQueryOptions.QueryRequest != nil {
+ return req, runtime.MarshalAsXML(req, *blobClientQueryOptions.QueryRequest)
}
return req, nil
}
// queryHandleResponse handles the Query response.
-func (client *blobClient) queryHandleResponse(resp *http.Response) (BlobQueryResponse, error) {
- result := BlobQueryResponse{RawResponse: resp}
+func (client *blobClient) queryHandleResponse(resp *http.Response) (blobClientQueryResponse, error) {
+ result := blobClientQueryResponse{RawResponse: resp}
if val := resp.Header.Get("Last-Modified"); val != "" {
lastModified, err := time.Parse(time.RFC1123, val)
if err != nil {
- return BlobQueryResponse{}, err
+ return blobClientQueryResponse{}, err
}
result.LastModified = &lastModified
}
@@ -1590,7 +1687,7 @@ func (client *blobClient) queryHandleResponse(resp *http.Response) (BlobQueryRes
if val := resp.Header.Get("Content-Length"); val != "" {
contentLength, err := strconv.ParseInt(val, 10, 64)
if err != nil {
- return BlobQueryResponse{}, err
+ return blobClientQueryResponse{}, err
}
result.ContentLength = &contentLength
}
@@ -1606,7 +1703,7 @@ func (client *blobClient) queryHandleResponse(resp *http.Response) (BlobQueryRes
if val := resp.Header.Get("Content-MD5"); val != "" {
contentMD5, err := base64.StdEncoding.DecodeString(val)
if err != nil {
- return BlobQueryResponse{}, err
+ return blobClientQueryResponse{}, err
}
result.ContentMD5 = contentMD5
}
@@ -1625,7 +1722,7 @@ func (client *blobClient) queryHandleResponse(resp *http.Response) (BlobQueryRes
if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" {
blobSequenceNumber, err := strconv.ParseInt(val, 10, 64)
if err != nil {
- return BlobQueryResponse{}, err
+ return blobClientQueryResponse{}, err
}
result.BlobSequenceNumber = &blobSequenceNumber
}
@@ -1635,7 +1732,7 @@ func (client *blobClient) queryHandleResponse(resp *http.Response) (BlobQueryRes
if val := resp.Header.Get("x-ms-copy-completion-time"); val != "" {
copyCompletionTime, err := time.Parse(time.RFC1123, val)
if err != nil {
- return BlobQueryResponse{}, err
+ return blobClientQueryResponse{}, err
}
result.CopyCompletionTime = ©CompletionTime
}
@@ -1678,7 +1775,7 @@ func (client *blobClient) queryHandleResponse(resp *http.Response) (BlobQueryRes
if val := resp.Header.Get("Date"); val != "" {
date, err := time.Parse(time.RFC1123, val)
if err != nil {
- return BlobQueryResponse{}, err
+ return blobClientQueryResponse{}, err
}
result.Date = &date
}
@@ -1686,14 +1783,14 @@ func (client *blobClient) queryHandleResponse(resp *http.Response) (BlobQueryRes
blobCommittedBlockCount32, err := strconv.ParseInt(val, 10, 32)
blobCommittedBlockCount := int32(blobCommittedBlockCount32)
if err != nil {
- return BlobQueryResponse{}, err
+ return blobClientQueryResponse{}, err
}
result.BlobCommittedBlockCount = &blobCommittedBlockCount
}
if val := resp.Header.Get("x-ms-server-encrypted"); val != "" {
isServerEncrypted, err := strconv.ParseBool(val)
if err != nil {
- return BlobQueryResponse{}, err
+ return blobClientQueryResponse{}, err
}
result.IsServerEncrypted = &isServerEncrypted
}
@@ -1706,14 +1803,14 @@ func (client *blobClient) queryHandleResponse(resp *http.Response) (BlobQueryRes
if val := resp.Header.Get("x-ms-blob-content-md5"); val != "" {
blobContentMD5, err := base64.StdEncoding.DecodeString(val)
if err != nil {
- return BlobQueryResponse{}, err
+ return blobClientQueryResponse{}, err
}
result.BlobContentMD5 = blobContentMD5
}
if val := resp.Header.Get("x-ms-content-crc64"); val != "" {
contentCRC64, err := base64.StdEncoding.DecodeString(val)
if err != nil {
- return BlobQueryResponse{}, err
+ return blobClientQueryResponse{}, err
}
result.ContentCRC64 = contentCRC64
}
@@ -1721,32 +1818,36 @@ func (client *blobClient) queryHandleResponse(resp *http.Response) (BlobQueryRes
}
// ReleaseLease - [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations
-// If the operation fails it returns the *StorageError error type.
-func (client *blobClient) ReleaseLease(ctx context.Context, leaseID string, blobReleaseLeaseOptions *BlobReleaseLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (BlobReleaseLeaseResponse, error) {
- req, err := client.releaseLeaseCreateRequest(ctx, leaseID, blobReleaseLeaseOptions, modifiedAccessConditions)
+// If the operation fails it returns an *azcore.ResponseError type.
+// leaseID - Specifies the current lease ID on the resource.
+// blobClientReleaseLeaseOptions - blobClientReleaseLeaseOptions contains the optional parameters for the blobClient.ReleaseLease
+// method.
+// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method.
+func (client *blobClient) ReleaseLease(ctx context.Context, leaseID string, blobClientReleaseLeaseOptions *blobClientReleaseLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (blobClientReleaseLeaseResponse, error) {
+ req, err := client.releaseLeaseCreateRequest(ctx, leaseID, blobClientReleaseLeaseOptions, modifiedAccessConditions)
if err != nil {
- return BlobReleaseLeaseResponse{}, err
+ return blobClientReleaseLeaseResponse{}, err
}
- resp, err := client.con.Pipeline().Do(req)
+ resp, err := client.pl.Do(req)
if err != nil {
- return BlobReleaseLeaseResponse{}, err
+ return blobClientReleaseLeaseResponse{}, err
}
if !runtime.HasStatusCode(resp, http.StatusOK) {
- return BlobReleaseLeaseResponse{}, runtime.NewResponseError(resp)
+ return blobClientReleaseLeaseResponse{}, runtime.NewResponseError(resp)
}
return client.releaseLeaseHandleResponse(resp)
}
// releaseLeaseCreateRequest creates the ReleaseLease request.
-func (client *blobClient) releaseLeaseCreateRequest(ctx context.Context, leaseID string, blobReleaseLeaseOptions *BlobReleaseLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) {
- req, err := runtime.NewRequest(ctx, http.MethodPut, client.con.Endpoint())
+func (client *blobClient) releaseLeaseCreateRequest(ctx context.Context, leaseID string, blobClientReleaseLeaseOptions *blobClientReleaseLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) {
+ req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint)
if err != nil {
return nil, err
}
reqQP := req.Raw().URL.Query()
reqQP.Set("comp", "lease")
- if blobReleaseLeaseOptions != nil && blobReleaseLeaseOptions.Timeout != nil {
- reqQP.Set("timeout", strconv.FormatInt(int64(*blobReleaseLeaseOptions.Timeout), 10))
+ if blobClientReleaseLeaseOptions != nil && blobClientReleaseLeaseOptions.Timeout != nil {
+ reqQP.Set("timeout", strconv.FormatInt(int64(*blobClientReleaseLeaseOptions.Timeout), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
req.Raw().Header.Set("x-ms-lease-action", "release")
@@ -1766,24 +1867,24 @@ func (client *blobClient) releaseLeaseCreateRequest(ctx context.Context, leaseID
if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil {
req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags)
}
- req.Raw().Header.Set("x-ms-version", "2019-12-12")
- if blobReleaseLeaseOptions != nil && blobReleaseLeaseOptions.RequestID != nil {
- req.Raw().Header.Set("x-ms-client-request-id", *blobReleaseLeaseOptions.RequestID)
+ req.Raw().Header.Set("x-ms-version", "2020-10-02")
+ if blobClientReleaseLeaseOptions != nil && blobClientReleaseLeaseOptions.RequestID != nil {
+ req.Raw().Header.Set("x-ms-client-request-id", *blobClientReleaseLeaseOptions.RequestID)
}
req.Raw().Header.Set("Accept", "application/xml")
return req, nil
}
// releaseLeaseHandleResponse handles the ReleaseLease response.
-func (client *blobClient) releaseLeaseHandleResponse(resp *http.Response) (BlobReleaseLeaseResponse, error) {
- result := BlobReleaseLeaseResponse{RawResponse: resp}
+func (client *blobClient) releaseLeaseHandleResponse(resp *http.Response) (blobClientReleaseLeaseResponse, error) {
+ result := blobClientReleaseLeaseResponse{RawResponse: resp}
if val := resp.Header.Get("ETag"); val != "" {
result.ETag = &val
}
if val := resp.Header.Get("Last-Modified"); val != "" {
lastModified, err := time.Parse(time.RFC1123, val)
if err != nil {
- return BlobReleaseLeaseResponse{}, err
+ return blobClientReleaseLeaseResponse{}, err
}
result.LastModified = &lastModified
}
@@ -1799,79 +1900,48 @@ func (client *blobClient) releaseLeaseHandleResponse(resp *http.Response) (BlobR
if val := resp.Header.Get("Date"); val != "" {
date, err := time.Parse(time.RFC1123, val)
if err != nil {
- return BlobReleaseLeaseResponse{}, err
+ return blobClientReleaseLeaseResponse{}, err
}
result.Date = &date
}
return result, nil
}
-// Rename - Rename a blob/file. By default, the destination is overwritten and if the destination already exists and has a lease the lease is broken. This
-// operation supports conditional HTTP requests. For more
-// information, see Specifying Conditional Headers for Blob Service Operations [https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations].
-// To
-// fail if the destination already exists, use a conditional request with If-None-Match: "*".
-// If the operation fails it returns the *DataLakeStorageError error type.
-func (client *blobClient) Rename(ctx context.Context, renameSource string, blobRenameOptions *BlobRenameOptions, directoryHTTPHeaders *DirectoryHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (BlobRenameResponse, error) {
- req, err := client.renameCreateRequest(ctx, renameSource, blobRenameOptions, directoryHTTPHeaders, leaseAccessConditions, modifiedAccessConditions, sourceModifiedAccessConditions)
+// RenewLease - [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations
+// If the operation fails it returns an *azcore.ResponseError type.
+// leaseID - Specifies the current lease ID on the resource.
+// blobClientRenewLeaseOptions - blobClientRenewLeaseOptions contains the optional parameters for the blobClient.RenewLease
+// method.
+// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method.
+func (client *blobClient) RenewLease(ctx context.Context, leaseID string, blobClientRenewLeaseOptions *blobClientRenewLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (blobClientRenewLeaseResponse, error) {
+ req, err := client.renewLeaseCreateRequest(ctx, leaseID, blobClientRenewLeaseOptions, modifiedAccessConditions)
if err != nil {
- return BlobRenameResponse{}, err
+ return blobClientRenewLeaseResponse{}, err
}
- resp, err := client.con.Pipeline().Do(req)
+ resp, err := client.pl.Do(req)
if err != nil {
- return BlobRenameResponse{}, err
+ return blobClientRenewLeaseResponse{}, err
}
- if !runtime.HasStatusCode(resp, http.StatusCreated) {
- return BlobRenameResponse{}, runtime.NewResponseError(resp)
+ if !runtime.HasStatusCode(resp, http.StatusOK) {
+ return blobClientRenewLeaseResponse{}, runtime.NewResponseError(resp)
}
- return client.renameHandleResponse(resp)
+ return client.renewLeaseHandleResponse(resp)
}
-// renameCreateRequest creates the Rename request.
-func (client *blobClient) renameCreateRequest(ctx context.Context, renameSource string, blobRenameOptions *BlobRenameOptions, directoryHTTPHeaders *DirectoryHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (*policy.Request, error) {
- req, err := runtime.NewRequest(ctx, http.MethodPut, client.con.Endpoint())
+// renewLeaseCreateRequest creates the RenewLease request.
+func (client *blobClient) renewLeaseCreateRequest(ctx context.Context, leaseID string, blobClientRenewLeaseOptions *blobClientRenewLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) {
+ req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint)
if err != nil {
return nil, err
}
reqQP := req.Raw().URL.Query()
- if blobRenameOptions != nil && blobRenameOptions.Timeout != nil {
- reqQP.Set("timeout", strconv.FormatInt(int64(*blobRenameOptions.Timeout), 10))
- }
- if client.pathRenameMode != nil {
- reqQP.Set("mode", string(*client.pathRenameMode))
+ reqQP.Set("comp", "lease")
+ if blobClientRenewLeaseOptions != nil && blobClientRenewLeaseOptions.Timeout != nil {
+ reqQP.Set("timeout", strconv.FormatInt(int64(*blobClientRenewLeaseOptions.Timeout), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
- req.Raw().Header.Set("x-ms-rename-source", renameSource)
- if blobRenameOptions != nil && blobRenameOptions.DirectoryProperties != nil {
- req.Raw().Header.Set("x-ms-properties", *blobRenameOptions.DirectoryProperties)
- }
- if blobRenameOptions != nil && blobRenameOptions.PosixPermissions != nil {
- req.Raw().Header.Set("x-ms-permissions", *blobRenameOptions.PosixPermissions)
- }
- if blobRenameOptions != nil && blobRenameOptions.PosixUmask != nil {
- req.Raw().Header.Set("x-ms-umask", *blobRenameOptions.PosixUmask)
- }
- if directoryHTTPHeaders != nil && directoryHTTPHeaders.CacheControl != nil {
- req.Raw().Header.Set("x-ms-cache-control", *directoryHTTPHeaders.CacheControl)
- }
- if directoryHTTPHeaders != nil && directoryHTTPHeaders.ContentType != nil {
- req.Raw().Header.Set("x-ms-content-type", *directoryHTTPHeaders.ContentType)
- }
- if directoryHTTPHeaders != nil && directoryHTTPHeaders.ContentEncoding != nil {
- req.Raw().Header.Set("x-ms-content-encoding", *directoryHTTPHeaders.ContentEncoding)
- }
- if directoryHTTPHeaders != nil && directoryHTTPHeaders.ContentLanguage != nil {
- req.Raw().Header.Set("x-ms-content-language", *directoryHTTPHeaders.ContentLanguage)
- }
- if directoryHTTPHeaders != nil && directoryHTTPHeaders.ContentDisposition != nil {
- req.Raw().Header.Set("x-ms-content-disposition", *directoryHTTPHeaders.ContentDisposition)
- }
- if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
- req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID)
- }
- if blobRenameOptions != nil && blobRenameOptions.SourceLeaseID != nil {
- req.Raw().Header.Set("x-ms-source-lease-id", *blobRenameOptions.SourceLeaseID)
- }
+ req.Raw().Header.Set("x-ms-lease-action", "renew")
+ req.Raw().Header.Set("x-ms-lease-id", leaseID)
if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123))
}
@@ -1884,39 +1954,33 @@ func (client *blobClient) renameCreateRequest(ctx context.Context, renameSource
if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil {
req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch)
}
- if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil {
- req.Raw().Header.Set("x-ms-source-if-modified-since", sourceModifiedAccessConditions.SourceIfModifiedSince.Format(time.RFC1123))
- }
- if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil {
- req.Raw().Header.Set("x-ms-source-if-unmodified-since", sourceModifiedAccessConditions.SourceIfUnmodifiedSince.Format(time.RFC1123))
- }
- if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil {
- req.Raw().Header.Set("x-ms-source-if-match", *sourceModifiedAccessConditions.SourceIfMatch)
- }
- if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil {
- req.Raw().Header.Set("x-ms-source-if-none-match", *sourceModifiedAccessConditions.SourceIfNoneMatch)
+ if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil {
+ req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags)
}
- req.Raw().Header.Set("x-ms-version", "2019-12-12")
- if blobRenameOptions != nil && blobRenameOptions.RequestID != nil {
- req.Raw().Header.Set("x-ms-client-request-id", *blobRenameOptions.RequestID)
+ req.Raw().Header.Set("x-ms-version", "2020-10-02")
+ if blobClientRenewLeaseOptions != nil && blobClientRenewLeaseOptions.RequestID != nil {
+ req.Raw().Header.Set("x-ms-client-request-id", *blobClientRenewLeaseOptions.RequestID)
}
req.Raw().Header.Set("Accept", "application/xml")
return req, nil
}
-// renameHandleResponse handles the Rename response.
-func (client *blobClient) renameHandleResponse(resp *http.Response) (BlobRenameResponse, error) {
- result := BlobRenameResponse{RawResponse: resp}
+// renewLeaseHandleResponse handles the RenewLease response.
+func (client *blobClient) renewLeaseHandleResponse(resp *http.Response) (blobClientRenewLeaseResponse, error) {
+ result := blobClientRenewLeaseResponse{RawResponse: resp}
if val := resp.Header.Get("ETag"); val != "" {
result.ETag = &val
}
if val := resp.Header.Get("Last-Modified"); val != "" {
lastModified, err := time.Parse(time.RFC1123, val)
if err != nil {
- return BlobRenameResponse{}, err
+ return blobClientRenewLeaseResponse{}, err
}
result.LastModified = &lastModified
}
+ if val := resp.Header.Get("x-ms-lease-id"); val != "" {
+ result.LeaseID = &val
+ }
if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
result.ClientRequestID = &val
}
@@ -1926,93 +1990,72 @@ func (client *blobClient) renameHandleResponse(resp *http.Response) (BlobRenameR
if val := resp.Header.Get("x-ms-version"); val != "" {
result.Version = &val
}
- if val := resp.Header.Get("Content-Length"); val != "" {
- contentLength, err := strconv.ParseInt(val, 10, 64)
- if err != nil {
- return BlobRenameResponse{}, err
- }
- result.ContentLength = &contentLength
- }
if val := resp.Header.Get("Date"); val != "" {
date, err := time.Parse(time.RFC1123, val)
if err != nil {
- return BlobRenameResponse{}, err
+ return blobClientRenewLeaseResponse{}, err
}
result.Date = &date
}
return result, nil
}
-// RenewLease - [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations
-// If the operation fails it returns the *StorageError error type.
-func (client *blobClient) RenewLease(ctx context.Context, leaseID string, blobRenewLeaseOptions *BlobRenewLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (BlobRenewLeaseResponse, error) {
- req, err := client.renewLeaseCreateRequest(ctx, leaseID, blobRenewLeaseOptions, modifiedAccessConditions)
+// SetExpiry - Sets the time a blob will expire and be deleted.
+// If the operation fails it returns an *azcore.ResponseError type.
+// expiryOptions - Required. Indicates mode of the expiry time
+// options - blobClientSetExpiryOptions contains the optional parameters for the blobClient.SetExpiry method.
+func (client *blobClient) SetExpiry(ctx context.Context, expiryOptions BlobExpiryOptions, options *blobClientSetExpiryOptions) (blobClientSetExpiryResponse, error) {
+ req, err := client.setExpiryCreateRequest(ctx, expiryOptions, options)
if err != nil {
- return BlobRenewLeaseResponse{}, err
+ return blobClientSetExpiryResponse{}, err
}
- resp, err := client.con.Pipeline().Do(req)
+ resp, err := client.pl.Do(req)
if err != nil {
- return BlobRenewLeaseResponse{}, err
+ return blobClientSetExpiryResponse{}, err
}
if !runtime.HasStatusCode(resp, http.StatusOK) {
- return BlobRenewLeaseResponse{}, runtime.NewResponseError(resp)
+ return blobClientSetExpiryResponse{}, runtime.NewResponseError(resp)
}
- return client.renewLeaseHandleResponse(resp)
+ return client.setExpiryHandleResponse(resp)
}
-// renewLeaseCreateRequest creates the RenewLease request.
-func (client *blobClient) renewLeaseCreateRequest(ctx context.Context, leaseID string, blobRenewLeaseOptions *BlobRenewLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) {
- req, err := runtime.NewRequest(ctx, http.MethodPut, client.con.Endpoint())
+// setExpiryCreateRequest creates the SetExpiry request.
+func (client *blobClient) setExpiryCreateRequest(ctx context.Context, expiryOptions BlobExpiryOptions, options *blobClientSetExpiryOptions) (*policy.Request, error) {
+ req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint)
if err != nil {
return nil, err
}
reqQP := req.Raw().URL.Query()
- reqQP.Set("comp", "lease")
- if blobRenewLeaseOptions != nil && blobRenewLeaseOptions.Timeout != nil {
- reqQP.Set("timeout", strconv.FormatInt(int64(*blobRenewLeaseOptions.Timeout), 10))
+ reqQP.Set("comp", "expiry")
+ if options != nil && options.Timeout != nil {
+ reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
- req.Raw().Header.Set("x-ms-lease-action", "renew")
- req.Raw().Header.Set("x-ms-lease-id", leaseID)
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
- req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123))
- }
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
- req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123))
- }
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil {
- req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch)
- }
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil {
- req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch)
- }
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil {
- req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags)
+ req.Raw().Header.Set("x-ms-version", "2020-10-02")
+ if options != nil && options.RequestID != nil {
+ req.Raw().Header.Set("x-ms-client-request-id", *options.RequestID)
}
- req.Raw().Header.Set("x-ms-version", "2019-12-12")
- if blobRenewLeaseOptions != nil && blobRenewLeaseOptions.RequestID != nil {
- req.Raw().Header.Set("x-ms-client-request-id", *blobRenewLeaseOptions.RequestID)
+ req.Raw().Header.Set("x-ms-expiry-option", string(expiryOptions))
+ if options != nil && options.ExpiresOn != nil {
+ req.Raw().Header.Set("x-ms-expiry-time", *options.ExpiresOn)
}
req.Raw().Header.Set("Accept", "application/xml")
return req, nil
}
-// renewLeaseHandleResponse handles the RenewLease response.
-func (client *blobClient) renewLeaseHandleResponse(resp *http.Response) (BlobRenewLeaseResponse, error) {
- result := BlobRenewLeaseResponse{RawResponse: resp}
+// setExpiryHandleResponse handles the SetExpiry response.
+func (client *blobClient) setExpiryHandleResponse(resp *http.Response) (blobClientSetExpiryResponse, error) {
+ result := blobClientSetExpiryResponse{RawResponse: resp}
if val := resp.Header.Get("ETag"); val != "" {
result.ETag = &val
}
if val := resp.Header.Get("Last-Modified"); val != "" {
lastModified, err := time.Parse(time.RFC1123, val)
if err != nil {
- return BlobRenewLeaseResponse{}, err
+ return blobClientSetExpiryResponse{}, err
}
result.LastModified = &lastModified
}
- if val := resp.Header.Get("x-ms-lease-id"); val != "" {
- result.LeaseID = &val
- }
if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
result.ClientRequestID = &val
}
@@ -2025,56 +2068,70 @@ func (client *blobClient) renewLeaseHandleResponse(resp *http.Response) (BlobRen
if val := resp.Header.Get("Date"); val != "" {
date, err := time.Parse(time.RFC1123, val)
if err != nil {
- return BlobRenewLeaseResponse{}, err
+ return blobClientSetExpiryResponse{}, err
}
result.Date = &date
}
return result, nil
}
-// SetAccessControl - Set the owner, group, permissions, or access control list for a blob.
-// If the operation fails it returns the *DataLakeStorageError error type.
-func (client *blobClient) SetAccessControl(ctx context.Context, blobSetAccessControlOptions *BlobSetAccessControlOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (BlobSetAccessControlResponse, error) {
- req, err := client.setAccessControlCreateRequest(ctx, blobSetAccessControlOptions, leaseAccessConditions, modifiedAccessConditions)
+// SetHTTPHeaders - The Set HTTP Headers operation sets system properties on the blob
+// If the operation fails it returns an *azcore.ResponseError type.
+// blobClientSetHTTPHeadersOptions - blobClientSetHTTPHeadersOptions contains the optional parameters for the blobClient.SetHTTPHeaders
+// method.
+// BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the blobClient.SetHTTPHeaders method.
+// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method.
+// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method.
+func (client *blobClient) SetHTTPHeaders(ctx context.Context, blobClientSetHTTPHeadersOptions *blobClientSetHTTPHeadersOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (blobClientSetHTTPHeadersResponse, error) {
+ req, err := client.setHTTPHeadersCreateRequest(ctx, blobClientSetHTTPHeadersOptions, blobHTTPHeaders, leaseAccessConditions, modifiedAccessConditions)
if err != nil {
- return BlobSetAccessControlResponse{}, err
+ return blobClientSetHTTPHeadersResponse{}, err
}
- resp, err := client.con.Pipeline().Do(req)
+ resp, err := client.pl.Do(req)
if err != nil {
- return BlobSetAccessControlResponse{}, err
+ return blobClientSetHTTPHeadersResponse{}, err
}
if !runtime.HasStatusCode(resp, http.StatusOK) {
- return BlobSetAccessControlResponse{}, runtime.NewResponseError(resp)
+ return blobClientSetHTTPHeadersResponse{}, runtime.NewResponseError(resp)
}
- return client.setAccessControlHandleResponse(resp)
+ return client.setHTTPHeadersHandleResponse(resp)
}
-// setAccessControlCreateRequest creates the SetAccessControl request.
-func (client *blobClient) setAccessControlCreateRequest(ctx context.Context, blobSetAccessControlOptions *BlobSetAccessControlOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) {
- req, err := runtime.NewRequest(ctx, http.MethodPatch, client.con.Endpoint())
+// setHTTPHeadersCreateRequest creates the SetHTTPHeaders request.
+func (client *blobClient) setHTTPHeadersCreateRequest(ctx context.Context, blobClientSetHTTPHeadersOptions *blobClientSetHTTPHeadersOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) {
+ req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint)
if err != nil {
return nil, err
}
reqQP := req.Raw().URL.Query()
- reqQP.Set("action", "setAccessControl")
- if blobSetAccessControlOptions != nil && blobSetAccessControlOptions.Timeout != nil {
- reqQP.Set("timeout", strconv.FormatInt(int64(*blobSetAccessControlOptions.Timeout), 10))
+ reqQP.Set("comp", "properties")
+ if blobClientSetHTTPHeadersOptions != nil && blobClientSetHTTPHeadersOptions.Timeout != nil {
+ reqQP.Set("timeout", strconv.FormatInt(int64(*blobClientSetHTTPHeadersOptions.Timeout), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
- if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
- req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID)
+ if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil {
+ req.Raw().Header.Set("x-ms-blob-cache-control", *blobHTTPHeaders.BlobCacheControl)
}
- if blobSetAccessControlOptions != nil && blobSetAccessControlOptions.Owner != nil {
- req.Raw().Header.Set("x-ms-owner", *blobSetAccessControlOptions.Owner)
+ if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil {
+ req.Raw().Header.Set("x-ms-blob-content-type", *blobHTTPHeaders.BlobContentType)
}
- if blobSetAccessControlOptions != nil && blobSetAccessControlOptions.Group != nil {
- req.Raw().Header.Set("x-ms-group", *blobSetAccessControlOptions.Group)
+ if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentMD5 != nil {
+ req.Raw().Header.Set("x-ms-blob-content-md5", base64.StdEncoding.EncodeToString(blobHTTPHeaders.BlobContentMD5))
}
- if blobSetAccessControlOptions != nil && blobSetAccessControlOptions.PosixPermissions != nil {
- req.Raw().Header.Set("x-ms-permissions", *blobSetAccessControlOptions.PosixPermissions)
+ if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentEncoding != nil {
+ req.Raw().Header.Set("x-ms-blob-content-encoding", *blobHTTPHeaders.BlobContentEncoding)
}
- if blobSetAccessControlOptions != nil && blobSetAccessControlOptions.PosixACL != nil {
- req.Raw().Header.Set("x-ms-acl", *blobSetAccessControlOptions.PosixACL)
+ if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentLanguage != nil {
+ req.Raw().Header.Set("x-ms-blob-content-language", *blobHTTPHeaders.BlobContentLanguage)
+ }
+ if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
+ req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID)
+ }
+ if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
+ req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123))
+ }
+ if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
+ req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123))
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil {
req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch)
@@ -2082,103 +2139,111 @@ func (client *blobClient) setAccessControlCreateRequest(ctx context.Context, blo
if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil {
req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch)
}
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
- req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123))
+ if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil {
+ req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags)
}
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
- req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123))
+ if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil {
+ req.Raw().Header.Set("x-ms-blob-content-disposition", *blobHTTPHeaders.BlobContentDisposition)
}
- if blobSetAccessControlOptions != nil && blobSetAccessControlOptions.RequestID != nil {
- req.Raw().Header.Set("x-ms-client-request-id", *blobSetAccessControlOptions.RequestID)
+ req.Raw().Header.Set("x-ms-version", "2020-10-02")
+ if blobClientSetHTTPHeadersOptions != nil && blobClientSetHTTPHeadersOptions.RequestID != nil {
+ req.Raw().Header.Set("x-ms-client-request-id", *blobClientSetHTTPHeadersOptions.RequestID)
}
- req.Raw().Header.Set("x-ms-version", "2019-12-12")
req.Raw().Header.Set("Accept", "application/xml")
return req, nil
}
-// setAccessControlHandleResponse handles the SetAccessControl response.
-func (client *blobClient) setAccessControlHandleResponse(resp *http.Response) (BlobSetAccessControlResponse, error) {
- result := BlobSetAccessControlResponse{RawResponse: resp}
- if val := resp.Header.Get("Date"); val != "" {
- date, err := time.Parse(time.RFC1123, val)
- if err != nil {
- return BlobSetAccessControlResponse{}, err
- }
- result.Date = &date
- }
+// setHTTPHeadersHandleResponse handles the SetHTTPHeaders response.
+func (client *blobClient) setHTTPHeadersHandleResponse(resp *http.Response) (blobClientSetHTTPHeadersResponse, error) {
+ result := blobClientSetHTTPHeadersResponse{RawResponse: resp}
if val := resp.Header.Get("ETag"); val != "" {
result.ETag = &val
}
if val := resp.Header.Get("Last-Modified"); val != "" {
lastModified, err := time.Parse(time.RFC1123, val)
if err != nil {
- return BlobSetAccessControlResponse{}, err
+ return blobClientSetHTTPHeadersResponse{}, err
}
result.LastModified = &lastModified
}
+ if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" {
+ blobSequenceNumber, err := strconv.ParseInt(val, 10, 64)
+ if err != nil {
+ return blobClientSetHTTPHeadersResponse{}, err
+ }
+ result.BlobSequenceNumber = &blobSequenceNumber
+ }
+ if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
+ result.ClientRequestID = &val
+ }
if val := resp.Header.Get("x-ms-request-id"); val != "" {
result.RequestID = &val
}
if val := resp.Header.Get("x-ms-version"); val != "" {
result.Version = &val
}
+ if val := resp.Header.Get("Date"); val != "" {
+ date, err := time.Parse(time.RFC1123, val)
+ if err != nil {
+ return blobClientSetHTTPHeadersResponse{}, err
+ }
+ result.Date = &date
+ }
return result, nil
}
-// SetExpiry - Sets the time a blob will expire and be deleted.
-// If the operation fails it returns the *StorageError error type.
-func (client *blobClient) SetExpiry(ctx context.Context, expiryOptions BlobExpiryOptions, options *BlobSetExpiryOptions) (BlobSetExpiryResponse, error) {
- req, err := client.setExpiryCreateRequest(ctx, expiryOptions, options)
+// SetImmutabilityPolicy - The Set Immutability Policy operation sets the immutability policy on the blob
+// If the operation fails it returns an *azcore.ResponseError type.
+// blobClientSetImmutabilityPolicyOptions - blobClientSetImmutabilityPolicyOptions contains the optional parameters for the
+// blobClient.SetImmutabilityPolicy method.
+// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method.
+func (client *blobClient) SetImmutabilityPolicy(ctx context.Context, blobClientSetImmutabilityPolicyOptions *blobClientSetImmutabilityPolicyOptions, modifiedAccessConditions *ModifiedAccessConditions) (blobClientSetImmutabilityPolicyResponse, error) {
+ req, err := client.setImmutabilityPolicyCreateRequest(ctx, blobClientSetImmutabilityPolicyOptions, modifiedAccessConditions)
if err != nil {
- return BlobSetExpiryResponse{}, err
+ return blobClientSetImmutabilityPolicyResponse{}, err
}
- resp, err := client.con.Pipeline().Do(req)
+ resp, err := client.pl.Do(req)
if err != nil {
- return BlobSetExpiryResponse{}, err
+ return blobClientSetImmutabilityPolicyResponse{}, err
}
if !runtime.HasStatusCode(resp, http.StatusOK) {
- return BlobSetExpiryResponse{}, runtime.NewResponseError(resp)
+ return blobClientSetImmutabilityPolicyResponse{}, runtime.NewResponseError(resp)
}
- return client.setExpiryHandleResponse(resp)
+ return client.setImmutabilityPolicyHandleResponse(resp)
}
-// setExpiryCreateRequest creates the SetExpiry request.
-func (client *blobClient) setExpiryCreateRequest(ctx context.Context, expiryOptions BlobExpiryOptions, options *BlobSetExpiryOptions) (*policy.Request, error) {
- req, err := runtime.NewRequest(ctx, http.MethodPut, client.con.Endpoint())
+// setImmutabilityPolicyCreateRequest creates the SetImmutabilityPolicy request.
+func (client *blobClient) setImmutabilityPolicyCreateRequest(ctx context.Context, blobClientSetImmutabilityPolicyOptions *blobClientSetImmutabilityPolicyOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) {
+ req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint)
if err != nil {
return nil, err
}
reqQP := req.Raw().URL.Query()
- reqQP.Set("comp", "expiry")
- if options != nil && options.Timeout != nil {
- reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
+ reqQP.Set("comp", "immutabilityPolicies")
+ if blobClientSetImmutabilityPolicyOptions != nil && blobClientSetImmutabilityPolicyOptions.Timeout != nil {
+ reqQP.Set("timeout", strconv.FormatInt(int64(*blobClientSetImmutabilityPolicyOptions.Timeout), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
- req.Raw().Header.Set("x-ms-version", "2019-12-12")
- if options != nil && options.RequestID != nil {
- req.Raw().Header.Set("x-ms-client-request-id", *options.RequestID)
+ req.Raw().Header.Set("x-ms-version", "2020-10-02")
+ if blobClientSetImmutabilityPolicyOptions != nil && blobClientSetImmutabilityPolicyOptions.RequestID != nil {
+ req.Raw().Header.Set("x-ms-client-request-id", *blobClientSetImmutabilityPolicyOptions.RequestID)
}
- req.Raw().Header.Set("x-ms-expiry-option", string(expiryOptions))
- if options != nil && options.ExpiresOn != nil {
- req.Raw().Header.Set("x-ms-expiry-time", *options.ExpiresOn)
+ if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
+ req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123))
+ }
+ if blobClientSetImmutabilityPolicyOptions != nil && blobClientSetImmutabilityPolicyOptions.ImmutabilityPolicyExpiry != nil {
+ req.Raw().Header.Set("x-ms-immutability-policy-until-date", blobClientSetImmutabilityPolicyOptions.ImmutabilityPolicyExpiry.Format(time.RFC1123))
+ }
+ if blobClientSetImmutabilityPolicyOptions != nil && blobClientSetImmutabilityPolicyOptions.ImmutabilityPolicyMode != nil {
+ req.Raw().Header.Set("x-ms-immutability-policy-mode", string(*blobClientSetImmutabilityPolicyOptions.ImmutabilityPolicyMode))
}
req.Raw().Header.Set("Accept", "application/xml")
return req, nil
}
-// setExpiryHandleResponse handles the SetExpiry response.
-func (client *blobClient) setExpiryHandleResponse(resp *http.Response) (BlobSetExpiryResponse, error) {
- result := BlobSetExpiryResponse{RawResponse: resp}
- if val := resp.Header.Get("ETag"); val != "" {
- result.ETag = &val
- }
- if val := resp.Header.Get("Last-Modified"); val != "" {
- lastModified, err := time.Parse(time.RFC1123, val)
- if err != nil {
- return BlobSetExpiryResponse{}, err
- }
- result.LastModified = &lastModified
- }
+// setImmutabilityPolicyHandleResponse handles the SetImmutabilityPolicy response.
+func (client *blobClient) setImmutabilityPolicyHandleResponse(resp *http.Response) (blobClientSetImmutabilityPolicyResponse, error) {
+ result := blobClientSetImmutabilityPolicyResponse{RawResponse: resp}
if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
result.ClientRequestID = &val
}
@@ -2191,106 +2256,66 @@ func (client *blobClient) setExpiryHandleResponse(resp *http.Response) (BlobSetE
if val := resp.Header.Get("Date"); val != "" {
date, err := time.Parse(time.RFC1123, val)
if err != nil {
- return BlobSetExpiryResponse{}, err
+ return blobClientSetImmutabilityPolicyResponse{}, err
}
result.Date = &date
}
+ if val := resp.Header.Get("x-ms-immutability-policy-until-date"); val != "" {
+ immutabilityPolicyExpiry, err := time.Parse(time.RFC1123, val)
+ if err != nil {
+ return blobClientSetImmutabilityPolicyResponse{}, err
+ }
+ result.ImmutabilityPolicyExpiry = &immutabilityPolicyExpiry
+ }
+ if val := resp.Header.Get("x-ms-immutability-policy-mode"); val != "" {
+ result.ImmutabilityPolicyMode = (*BlobImmutabilityPolicyMode)(&val)
+ }
return result, nil
}
-// SetHTTPHeaders - The Set HTTP Headers operation sets system properties on the blob
-// If the operation fails it returns the *StorageError error type.
-func (client *blobClient) SetHTTPHeaders(ctx context.Context, blobSetHTTPHeadersOptions *BlobSetHTTPHeadersOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (BlobSetHTTPHeadersResponse, error) {
- req, err := client.setHTTPHeadersCreateRequest(ctx, blobSetHTTPHeadersOptions, blobHTTPHeaders, leaseAccessConditions, modifiedAccessConditions)
+// SetLegalHold - The Set Legal Hold operation sets a legal hold on the blob.
+// If the operation fails it returns an *azcore.ResponseError type.
+// legalHold - Specified if a legal hold should be set on the blob.
+// options - blobClientSetLegalHoldOptions contains the optional parameters for the blobClient.SetLegalHold method.
+func (client *blobClient) SetLegalHold(ctx context.Context, legalHold bool, options *blobClientSetLegalHoldOptions) (blobClientSetLegalHoldResponse, error) {
+ req, err := client.setLegalHoldCreateRequest(ctx, legalHold, options)
if err != nil {
- return BlobSetHTTPHeadersResponse{}, err
+ return blobClientSetLegalHoldResponse{}, err
}
- resp, err := client.con.Pipeline().Do(req)
+ resp, err := client.pl.Do(req)
if err != nil {
- return BlobSetHTTPHeadersResponse{}, err
+ return blobClientSetLegalHoldResponse{}, err
}
if !runtime.HasStatusCode(resp, http.StatusOK) {
- return BlobSetHTTPHeadersResponse{}, runtime.NewResponseError(resp)
+ return blobClientSetLegalHoldResponse{}, runtime.NewResponseError(resp)
}
- return client.setHTTPHeadersHandleResponse(resp)
+ return client.setLegalHoldHandleResponse(resp)
}
-// setHTTPHeadersCreateRequest creates the SetHTTPHeaders request.
-func (client *blobClient) setHTTPHeadersCreateRequest(ctx context.Context, blobSetHTTPHeadersOptions *BlobSetHTTPHeadersOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) {
- req, err := runtime.NewRequest(ctx, http.MethodPut, client.con.Endpoint())
+// setLegalHoldCreateRequest creates the SetLegalHold request.
+func (client *blobClient) setLegalHoldCreateRequest(ctx context.Context, legalHold bool, options *blobClientSetLegalHoldOptions) (*policy.Request, error) {
+ req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint)
if err != nil {
return nil, err
}
reqQP := req.Raw().URL.Query()
- reqQP.Set("comp", "properties")
- if blobSetHTTPHeadersOptions != nil && blobSetHTTPHeadersOptions.Timeout != nil {
- reqQP.Set("timeout", strconv.FormatInt(int64(*blobSetHTTPHeadersOptions.Timeout), 10))
+ reqQP.Set("comp", "legalhold")
+ if options != nil && options.Timeout != nil {
+ reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
- if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil {
- req.Raw().Header.Set("x-ms-blob-cache-control", *blobHTTPHeaders.BlobCacheControl)
- }
- if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil {
- req.Raw().Header.Set("x-ms-blob-content-type", *blobHTTPHeaders.BlobContentType)
- }
- if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentMD5 != nil {
- req.Raw().Header.Set("x-ms-blob-content-md5", base64.StdEncoding.EncodeToString(blobHTTPHeaders.BlobContentMD5))
- }
- if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentEncoding != nil {
- req.Raw().Header.Set("x-ms-blob-content-encoding", *blobHTTPHeaders.BlobContentEncoding)
- }
- if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentLanguage != nil {
- req.Raw().Header.Set("x-ms-blob-content-language", *blobHTTPHeaders.BlobContentLanguage)
- }
- if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
- req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID)
- }
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
- req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123))
- }
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
- req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123))
- }
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil {
- req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch)
- }
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil {
- req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch)
- }
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil {
- req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags)
- }
- if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil {
- req.Raw().Header.Set("x-ms-blob-content-disposition", *blobHTTPHeaders.BlobContentDisposition)
- }
- req.Raw().Header.Set("x-ms-version", "2019-12-12")
- if blobSetHTTPHeadersOptions != nil && blobSetHTTPHeadersOptions.RequestID != nil {
- req.Raw().Header.Set("x-ms-client-request-id", *blobSetHTTPHeadersOptions.RequestID)
+ req.Raw().Header.Set("x-ms-version", "2020-10-02")
+ if options != nil && options.RequestID != nil {
+ req.Raw().Header.Set("x-ms-client-request-id", *options.RequestID)
}
+ req.Raw().Header.Set("x-ms-legal-hold", strconv.FormatBool(legalHold))
req.Raw().Header.Set("Accept", "application/xml")
return req, nil
}
-// setHTTPHeadersHandleResponse handles the SetHTTPHeaders response.
-func (client *blobClient) setHTTPHeadersHandleResponse(resp *http.Response) (BlobSetHTTPHeadersResponse, error) {
- result := BlobSetHTTPHeadersResponse{RawResponse: resp}
- if val := resp.Header.Get("ETag"); val != "" {
- result.ETag = &val
- }
- if val := resp.Header.Get("Last-Modified"); val != "" {
- lastModified, err := time.Parse(time.RFC1123, val)
- if err != nil {
- return BlobSetHTTPHeadersResponse{}, err
- }
- result.LastModified = &lastModified
- }
- if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" {
- blobSequenceNumber, err := strconv.ParseInt(val, 10, 64)
- if err != nil {
- return BlobSetHTTPHeadersResponse{}, err
- }
- result.BlobSequenceNumber = &blobSequenceNumber
- }
+// setLegalHoldHandleResponse handles the SetLegalHold response.
+func (client *blobClient) setLegalHoldHandleResponse(resp *http.Response) (blobClientSetLegalHoldResponse, error) {
+ result := blobClientSetLegalHoldResponse{RawResponse: resp}
if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
result.ClientRequestID = &val
}
@@ -2303,44 +2328,58 @@ func (client *blobClient) setHTTPHeadersHandleResponse(resp *http.Response) (Blo
if val := resp.Header.Get("Date"); val != "" {
date, err := time.Parse(time.RFC1123, val)
if err != nil {
- return BlobSetHTTPHeadersResponse{}, err
+ return blobClientSetLegalHoldResponse{}, err
}
result.Date = &date
}
+ if val := resp.Header.Get("x-ms-legal-hold"); val != "" {
+ legalHold, err := strconv.ParseBool(val)
+ if err != nil {
+ return blobClientSetLegalHoldResponse{}, err
+ }
+ result.LegalHold = &legalHold
+ }
return result, nil
}
-// SetMetadata - The Set Blob Metadata operation sets user-defined metadata for the specified blob as one or more name-value pairs
-// If the operation fails it returns the *StorageError error type.
-func (client *blobClient) SetMetadata(ctx context.Context, blobSetMetadataOptions *BlobSetMetadataOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (BlobSetMetadataResponse, error) {
- req, err := client.setMetadataCreateRequest(ctx, blobSetMetadataOptions, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions)
+// SetMetadata - The Set Blob Metadata operation sets user-defined metadata for the specified blob as one or more name-value
+// pairs
+// If the operation fails it returns an *azcore.ResponseError type.
+// blobClientSetMetadataOptions - blobClientSetMetadataOptions contains the optional parameters for the blobClient.SetMetadata
+// method.
+// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method.
+// CpkInfo - CpkInfo contains a group of parameters for the blobClient.Download method.
+// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the blobClient.SetMetadata method.
+// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method.
+func (client *blobClient) SetMetadata(ctx context.Context, blobClientSetMetadataOptions *blobClientSetMetadataOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (blobClientSetMetadataResponse, error) {
+ req, err := client.setMetadataCreateRequest(ctx, blobClientSetMetadataOptions, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions)
if err != nil {
- return BlobSetMetadataResponse{}, err
+ return blobClientSetMetadataResponse{}, err
}
- resp, err := client.con.Pipeline().Do(req)
+ resp, err := client.pl.Do(req)
if err != nil {
- return BlobSetMetadataResponse{}, err
+ return blobClientSetMetadataResponse{}, err
}
if !runtime.HasStatusCode(resp, http.StatusOK) {
- return BlobSetMetadataResponse{}, runtime.NewResponseError(resp)
+ return blobClientSetMetadataResponse{}, runtime.NewResponseError(resp)
}
return client.setMetadataHandleResponse(resp)
}
// setMetadataCreateRequest creates the SetMetadata request.
-func (client *blobClient) setMetadataCreateRequest(ctx context.Context, blobSetMetadataOptions *BlobSetMetadataOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) {
- req, err := runtime.NewRequest(ctx, http.MethodPut, client.con.Endpoint())
+func (client *blobClient) setMetadataCreateRequest(ctx context.Context, blobClientSetMetadataOptions *blobClientSetMetadataOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) {
+ req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint)
if err != nil {
return nil, err
}
reqQP := req.Raw().URL.Query()
reqQP.Set("comp", "metadata")
- if blobSetMetadataOptions != nil && blobSetMetadataOptions.Timeout != nil {
- reqQP.Set("timeout", strconv.FormatInt(int64(*blobSetMetadataOptions.Timeout), 10))
+ if blobClientSetMetadataOptions != nil && blobClientSetMetadataOptions.Timeout != nil {
+ reqQP.Set("timeout", strconv.FormatInt(int64(*blobClientSetMetadataOptions.Timeout), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
- if blobSetMetadataOptions != nil && blobSetMetadataOptions.Metadata != nil {
- for k, v := range blobSetMetadataOptions.Metadata {
+ if blobClientSetMetadataOptions != nil && blobClientSetMetadataOptions.Metadata != nil {
+ for k, v := range blobClientSetMetadataOptions.Metadata {
req.Raw().Header.Set("x-ms-meta-"+k, v)
}
}
@@ -2354,7 +2393,7 @@ func (client *blobClient) setMetadataCreateRequest(ctx context.Context, blobSetM
req.Raw().Header.Set("x-ms-encryption-key-sha256", *cpkInfo.EncryptionKeySHA256)
}
if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil {
- req.Raw().Header.Set("x-ms-encryption-algorithm", "AES256")
+ req.Raw().Header.Set("x-ms-encryption-algorithm", string(*cpkInfo.EncryptionAlgorithm))
}
if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil {
req.Raw().Header.Set("x-ms-encryption-scope", *cpkScopeInfo.EncryptionScope)
@@ -2374,24 +2413,24 @@ func (client *blobClient) setMetadataCreateRequest(ctx context.Context, blobSetM
if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil {
req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags)
}
- req.Raw().Header.Set("x-ms-version", "2019-12-12")
- if blobSetMetadataOptions != nil && blobSetMetadataOptions.RequestID != nil {
- req.Raw().Header.Set("x-ms-client-request-id", *blobSetMetadataOptions.RequestID)
+ req.Raw().Header.Set("x-ms-version", "2020-10-02")
+ if blobClientSetMetadataOptions != nil && blobClientSetMetadataOptions.RequestID != nil {
+ req.Raw().Header.Set("x-ms-client-request-id", *blobClientSetMetadataOptions.RequestID)
}
req.Raw().Header.Set("Accept", "application/xml")
return req, nil
}
// setMetadataHandleResponse handles the SetMetadata response.
-func (client *blobClient) setMetadataHandleResponse(resp *http.Response) (BlobSetMetadataResponse, error) {
- result := BlobSetMetadataResponse{RawResponse: resp}
+func (client *blobClient) setMetadataHandleResponse(resp *http.Response) (blobClientSetMetadataResponse, error) {
+ result := blobClientSetMetadataResponse{RawResponse: resp}
if val := resp.Header.Get("ETag"); val != "" {
result.ETag = &val
}
if val := resp.Header.Get("Last-Modified"); val != "" {
lastModified, err := time.Parse(time.RFC1123, val)
if err != nil {
- return BlobSetMetadataResponse{}, err
+ return blobClientSetMetadataResponse{}, err
}
result.LastModified = &lastModified
}
@@ -2410,14 +2449,14 @@ func (client *blobClient) setMetadataHandleResponse(resp *http.Response) (BlobSe
if val := resp.Header.Get("Date"); val != "" {
date, err := time.Parse(time.RFC1123, val)
if err != nil {
- return BlobSetMetadataResponse{}, err
+ return blobClientSetMetadataResponse{}, err
}
result.Date = &date
}
if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" {
isServerEncrypted, err := strconv.ParseBool(val)
if err != nil {
- return BlobSetMetadataResponse{}, err
+ return blobClientSetMetadataResponse{}, err
}
result.IsServerEncrypted = &isServerEncrypted
}
@@ -2431,60 +2470,66 @@ func (client *blobClient) setMetadataHandleResponse(resp *http.Response) (BlobSe
}
// SetTags - The Set Tags operation enables users to set tags on a blob.
-// If the operation fails it returns the *StorageError error type.
-func (client *blobClient) SetTags(ctx context.Context, blobSetTagsOptions *BlobSetTagsOptions, modifiedAccessConditions *ModifiedAccessConditions) (BlobSetTagsResponse, error) {
- req, err := client.setTagsCreateRequest(ctx, blobSetTagsOptions, modifiedAccessConditions)
+// If the operation fails it returns an *azcore.ResponseError type.
+// blobClientSetTagsOptions - blobClientSetTagsOptions contains the optional parameters for the blobClient.SetTags method.
+// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method.
+// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method.
+func (client *blobClient) SetTags(ctx context.Context, blobClientSetTagsOptions *blobClientSetTagsOptions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (blobClientSetTagsResponse, error) {
+ req, err := client.setTagsCreateRequest(ctx, blobClientSetTagsOptions, modifiedAccessConditions, leaseAccessConditions)
if err != nil {
- return BlobSetTagsResponse{}, err
+ return blobClientSetTagsResponse{}, err
}
- resp, err := client.con.Pipeline().Do(req)
+ resp, err := client.pl.Do(req)
if err != nil {
- return BlobSetTagsResponse{}, err
+ return blobClientSetTagsResponse{}, err
}
if !runtime.HasStatusCode(resp, http.StatusNoContent) {
- return BlobSetTagsResponse{}, runtime.NewResponseError(resp)
+ return blobClientSetTagsResponse{}, runtime.NewResponseError(resp)
}
return client.setTagsHandleResponse(resp)
}
// setTagsCreateRequest creates the SetTags request.
-func (client *blobClient) setTagsCreateRequest(ctx context.Context, blobSetTagsOptions *BlobSetTagsOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) {
- req, err := runtime.NewRequest(ctx, http.MethodPut, client.con.Endpoint())
+func (client *blobClient) setTagsCreateRequest(ctx context.Context, blobClientSetTagsOptions *blobClientSetTagsOptions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) {
+ req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint)
if err != nil {
return nil, err
}
reqQP := req.Raw().URL.Query()
reqQP.Set("comp", "tags")
- if blobSetTagsOptions != nil && blobSetTagsOptions.Timeout != nil {
- reqQP.Set("timeout", strconv.FormatInt(int64(*blobSetTagsOptions.Timeout), 10))
+ if blobClientSetTagsOptions != nil && blobClientSetTagsOptions.Timeout != nil {
+ reqQP.Set("timeout", strconv.FormatInt(int64(*blobClientSetTagsOptions.Timeout), 10))
}
- if blobSetTagsOptions != nil && blobSetTagsOptions.VersionID != nil {
- reqQP.Set("versionid", *blobSetTagsOptions.VersionID)
+ if blobClientSetTagsOptions != nil && blobClientSetTagsOptions.VersionID != nil {
+ reqQP.Set("versionid", *blobClientSetTagsOptions.VersionID)
}
req.Raw().URL.RawQuery = reqQP.Encode()
- req.Raw().Header.Set("x-ms-version", "2019-12-12")
- if blobSetTagsOptions != nil && blobSetTagsOptions.TransactionalContentMD5 != nil {
- req.Raw().Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(blobSetTagsOptions.TransactionalContentMD5))
+ req.Raw().Header.Set("x-ms-version", "2020-10-02")
+ if blobClientSetTagsOptions != nil && blobClientSetTagsOptions.TransactionalContentMD5 != nil {
+ req.Raw().Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(blobClientSetTagsOptions.TransactionalContentMD5))
}
- if blobSetTagsOptions != nil && blobSetTagsOptions.TransactionalContentCRC64 != nil {
- req.Raw().Header.Set("x-ms-content-crc64", base64.StdEncoding.EncodeToString(blobSetTagsOptions.TransactionalContentCRC64))
+ if blobClientSetTagsOptions != nil && blobClientSetTagsOptions.TransactionalContentCRC64 != nil {
+ req.Raw().Header.Set("x-ms-content-crc64", base64.StdEncoding.EncodeToString(blobClientSetTagsOptions.TransactionalContentCRC64))
}
- if blobSetTagsOptions != nil && blobSetTagsOptions.RequestID != nil {
- req.Raw().Header.Set("x-ms-client-request-id", *blobSetTagsOptions.RequestID)
+ if blobClientSetTagsOptions != nil && blobClientSetTagsOptions.RequestID != nil {
+ req.Raw().Header.Set("x-ms-client-request-id", *blobClientSetTagsOptions.RequestID)
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil {
req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags)
}
+ if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
+ req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID)
+ }
req.Raw().Header.Set("Accept", "application/xml")
- if blobSetTagsOptions != nil && blobSetTagsOptions.Tags != nil {
- return req, runtime.MarshalAsXML(req, *blobSetTagsOptions.Tags)
+ if blobClientSetTagsOptions != nil && blobClientSetTagsOptions.Tags != nil {
+ return req, runtime.MarshalAsXML(req, *blobClientSetTagsOptions.Tags)
}
return req, nil
}
// setTagsHandleResponse handles the SetTags response.
-func (client *blobClient) setTagsHandleResponse(resp *http.Response) (BlobSetTagsResponse, error) {
- result := BlobSetTagsResponse{RawResponse: resp}
+func (client *blobClient) setTagsHandleResponse(resp *http.Response) (blobClientSetTagsResponse, error) {
+ result := blobClientSetTagsResponse{RawResponse: resp}
if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
result.ClientRequestID = &val
}
@@ -2497,58 +2542,62 @@ func (client *blobClient) setTagsHandleResponse(resp *http.Response) (BlobSetTag
if val := resp.Header.Get("Date"); val != "" {
date, err := time.Parse(time.RFC1123, val)
if err != nil {
- return BlobSetTagsResponse{}, err
+ return blobClientSetTagsResponse{}, err
}
result.Date = &date
}
return result, nil
}
-// SetTier - The Set Tier operation sets the tier on a blob. The operation is allowed on a page blob in a premium storage account and on a block blob in
-// a blob storage account (locally redundant storage only). A
-// premium page blob's tier determines the allowed size, IOPS, and bandwidth of the blob. A block blob's tier determines Hot/Cool/Archive storage type.
-// This operation does not update the blob's ETag.
-// If the operation fails it returns the *StorageError error type.
-func (client *blobClient) SetTier(ctx context.Context, tier AccessTier, blobSetTierOptions *BlobSetTierOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (BlobSetTierResponse, error) {
- req, err := client.setTierCreateRequest(ctx, tier, blobSetTierOptions, leaseAccessConditions, modifiedAccessConditions)
+// SetTier - The Set Tier operation sets the tier on a blob. The operation is allowed on a page blob in a premium storage
+// account and on a block blob in a blob storage account (locally redundant storage only). A
+// premium page blob's tier determines the allowed size, IOPS, and bandwidth of the blob. A block blob's tier determines Hot/Cool/Archive
+// storage type. This operation does not update the blob's ETag.
+// If the operation fails it returns an *azcore.ResponseError type.
+// tier - Indicates the tier to be set on the blob.
+// blobClientSetTierOptions - blobClientSetTierOptions contains the optional parameters for the blobClient.SetTier method.
+// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method.
+// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method.
+func (client *blobClient) SetTier(ctx context.Context, tier AccessTier, blobClientSetTierOptions *blobClientSetTierOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (blobClientSetTierResponse, error) {
+ req, err := client.setTierCreateRequest(ctx, tier, blobClientSetTierOptions, leaseAccessConditions, modifiedAccessConditions)
if err != nil {
- return BlobSetTierResponse{}, err
+ return blobClientSetTierResponse{}, err
}
- resp, err := client.con.Pipeline().Do(req)
+ resp, err := client.pl.Do(req)
if err != nil {
- return BlobSetTierResponse{}, err
+ return blobClientSetTierResponse{}, err
}
if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted) {
- return BlobSetTierResponse{}, runtime.NewResponseError(resp)
+ return blobClientSetTierResponse{}, runtime.NewResponseError(resp)
}
return client.setTierHandleResponse(resp)
}
// setTierCreateRequest creates the SetTier request.
-func (client *blobClient) setTierCreateRequest(ctx context.Context, tier AccessTier, blobSetTierOptions *BlobSetTierOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) {
- req, err := runtime.NewRequest(ctx, http.MethodPut, client.con.Endpoint())
+func (client *blobClient) setTierCreateRequest(ctx context.Context, tier AccessTier, blobClientSetTierOptions *blobClientSetTierOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) {
+ req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint)
if err != nil {
return nil, err
}
reqQP := req.Raw().URL.Query()
reqQP.Set("comp", "tier")
- if blobSetTierOptions != nil && blobSetTierOptions.Snapshot != nil {
- reqQP.Set("snapshot", *blobSetTierOptions.Snapshot)
+ if blobClientSetTierOptions != nil && blobClientSetTierOptions.Snapshot != nil {
+ reqQP.Set("snapshot", *blobClientSetTierOptions.Snapshot)
}
- if blobSetTierOptions != nil && blobSetTierOptions.VersionID != nil {
- reqQP.Set("versionid", *blobSetTierOptions.VersionID)
+ if blobClientSetTierOptions != nil && blobClientSetTierOptions.VersionID != nil {
+ reqQP.Set("versionid", *blobClientSetTierOptions.VersionID)
}
- if blobSetTierOptions != nil && blobSetTierOptions.Timeout != nil {
- reqQP.Set("timeout", strconv.FormatInt(int64(*blobSetTierOptions.Timeout), 10))
+ if blobClientSetTierOptions != nil && blobClientSetTierOptions.Timeout != nil {
+ reqQP.Set("timeout", strconv.FormatInt(int64(*blobClientSetTierOptions.Timeout), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
req.Raw().Header.Set("x-ms-access-tier", string(tier))
- if blobSetTierOptions != nil && blobSetTierOptions.RehydratePriority != nil {
- req.Raw().Header.Set("x-ms-rehydrate-priority", string(*blobSetTierOptions.RehydratePriority))
+ if blobClientSetTierOptions != nil && blobClientSetTierOptions.RehydratePriority != nil {
+ req.Raw().Header.Set("x-ms-rehydrate-priority", string(*blobClientSetTierOptions.RehydratePriority))
}
- req.Raw().Header.Set("x-ms-version", "2019-12-12")
- if blobSetTierOptions != nil && blobSetTierOptions.RequestID != nil {
- req.Raw().Header.Set("x-ms-client-request-id", *blobSetTierOptions.RequestID)
+ req.Raw().Header.Set("x-ms-version", "2020-10-02")
+ if blobClientSetTierOptions != nil && blobClientSetTierOptions.RequestID != nil {
+ req.Raw().Header.Set("x-ms-client-request-id", *blobClientSetTierOptions.RequestID)
}
if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID)
@@ -2561,8 +2610,8 @@ func (client *blobClient) setTierCreateRequest(ctx context.Context, tier AccessT
}
// setTierHandleResponse handles the SetTier response.
-func (client *blobClient) setTierHandleResponse(resp *http.Response) (BlobSetTierResponse, error) {
- result := BlobSetTierResponse{RawResponse: resp}
+func (client *blobClient) setTierHandleResponse(resp *http.Response) (blobClientSetTierResponse, error) {
+ result := blobClientSetTierResponse{RawResponse: resp}
if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
result.ClientRequestID = &val
}
@@ -2576,43 +2625,52 @@ func (client *blobClient) setTierHandleResponse(resp *http.Response) (BlobSetTie
}
// StartCopyFromURL - The Start Copy From URL operation copies a blob or an internet resource to a new blob.
-// If the operation fails it returns the *StorageError error type.
-func (client *blobClient) StartCopyFromURL(ctx context.Context, copySource string, blobStartCopyFromURLOptions *BlobStartCopyFromURLOptions, sourceModifiedAccessConditions *SourceModifiedAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (BlobStartCopyFromURLResponse, error) {
- req, err := client.startCopyFromURLCreateRequest(ctx, copySource, blobStartCopyFromURLOptions, sourceModifiedAccessConditions, modifiedAccessConditions, leaseAccessConditions)
+// If the operation fails it returns an *azcore.ResponseError type.
+// copySource - Specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that specifies
+// a page blob snapshot. The value should be URL-encoded as it would appear in a request
+// URI. The source blob must either be public or must be authenticated via a shared access signature.
+// blobClientStartCopyFromURLOptions - blobClientStartCopyFromURLOptions contains the optional parameters for the blobClient.StartCopyFromURL
+// method.
+// SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the blobClient.StartCopyFromURL
+// method.
+// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method.
+// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method.
+func (client *blobClient) StartCopyFromURL(ctx context.Context, copySource string, blobClientStartCopyFromURLOptions *blobClientStartCopyFromURLOptions, sourceModifiedAccessConditions *SourceModifiedAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (blobClientStartCopyFromURLResponse, error) {
+ req, err := client.startCopyFromURLCreateRequest(ctx, copySource, blobClientStartCopyFromURLOptions, sourceModifiedAccessConditions, modifiedAccessConditions, leaseAccessConditions)
if err != nil {
- return BlobStartCopyFromURLResponse{}, err
+ return blobClientStartCopyFromURLResponse{}, err
}
- resp, err := client.con.Pipeline().Do(req)
+ resp, err := client.pl.Do(req)
if err != nil {
- return BlobStartCopyFromURLResponse{}, err
+ return blobClientStartCopyFromURLResponse{}, err
}
if !runtime.HasStatusCode(resp, http.StatusAccepted) {
- return BlobStartCopyFromURLResponse{}, runtime.NewResponseError(resp)
+ return blobClientStartCopyFromURLResponse{}, runtime.NewResponseError(resp)
}
return client.startCopyFromURLHandleResponse(resp)
}
// startCopyFromURLCreateRequest creates the StartCopyFromURL request.
-func (client *blobClient) startCopyFromURLCreateRequest(ctx context.Context, copySource string, blobStartCopyFromURLOptions *BlobStartCopyFromURLOptions, sourceModifiedAccessConditions *SourceModifiedAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) {
- req, err := runtime.NewRequest(ctx, http.MethodPut, client.con.Endpoint())
+func (client *blobClient) startCopyFromURLCreateRequest(ctx context.Context, copySource string, blobClientStartCopyFromURLOptions *blobClientStartCopyFromURLOptions, sourceModifiedAccessConditions *SourceModifiedAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) {
+ req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint)
if err != nil {
return nil, err
}
reqQP := req.Raw().URL.Query()
- if blobStartCopyFromURLOptions != nil && blobStartCopyFromURLOptions.Timeout != nil {
- reqQP.Set("timeout", strconv.FormatInt(int64(*blobStartCopyFromURLOptions.Timeout), 10))
+ if blobClientStartCopyFromURLOptions != nil && blobClientStartCopyFromURLOptions.Timeout != nil {
+ reqQP.Set("timeout", strconv.FormatInt(int64(*blobClientStartCopyFromURLOptions.Timeout), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
- if blobStartCopyFromURLOptions != nil && blobStartCopyFromURLOptions.Metadata != nil {
- for k, v := range blobStartCopyFromURLOptions.Metadata {
+ if blobClientStartCopyFromURLOptions != nil && blobClientStartCopyFromURLOptions.Metadata != nil {
+ for k, v := range blobClientStartCopyFromURLOptions.Metadata {
req.Raw().Header.Set("x-ms-meta-"+k, v)
}
}
- if blobStartCopyFromURLOptions != nil && blobStartCopyFromURLOptions.Tier != nil {
- req.Raw().Header.Set("x-ms-access-tier", string(*blobStartCopyFromURLOptions.Tier))
+ if blobClientStartCopyFromURLOptions != nil && blobClientStartCopyFromURLOptions.Tier != nil {
+ req.Raw().Header.Set("x-ms-access-tier", string(*blobClientStartCopyFromURLOptions.Tier))
}
- if blobStartCopyFromURLOptions != nil && blobStartCopyFromURLOptions.RehydratePriority != nil {
- req.Raw().Header.Set("x-ms-rehydrate-priority", string(*blobStartCopyFromURLOptions.RehydratePriority))
+ if blobClientStartCopyFromURLOptions != nil && blobClientStartCopyFromURLOptions.RehydratePriority != nil {
+ req.Raw().Header.Set("x-ms-rehydrate-priority", string(*blobClientStartCopyFromURLOptions.RehydratePriority))
}
if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil {
req.Raw().Header.Set("x-ms-source-if-modified-since", sourceModifiedAccessConditions.SourceIfModifiedSince.Format(time.RFC1123))
@@ -2648,30 +2706,39 @@ func (client *blobClient) startCopyFromURLCreateRequest(ctx context.Context, cop
if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID)
}
- req.Raw().Header.Set("x-ms-version", "2019-12-12")
- if blobStartCopyFromURLOptions != nil && blobStartCopyFromURLOptions.RequestID != nil {
- req.Raw().Header.Set("x-ms-client-request-id", *blobStartCopyFromURLOptions.RequestID)
+ req.Raw().Header.Set("x-ms-version", "2020-10-02")
+ if blobClientStartCopyFromURLOptions != nil && blobClientStartCopyFromURLOptions.RequestID != nil {
+ req.Raw().Header.Set("x-ms-client-request-id", *blobClientStartCopyFromURLOptions.RequestID)
+ }
+ if blobClientStartCopyFromURLOptions != nil && blobClientStartCopyFromURLOptions.BlobTagsString != nil {
+ req.Raw().Header.Set("x-ms-tags", *blobClientStartCopyFromURLOptions.BlobTagsString)
+ }
+ if blobClientStartCopyFromURLOptions != nil && blobClientStartCopyFromURLOptions.SealBlob != nil {
+ req.Raw().Header.Set("x-ms-seal-blob", strconv.FormatBool(*blobClientStartCopyFromURLOptions.SealBlob))
+ }
+ if blobClientStartCopyFromURLOptions != nil && blobClientStartCopyFromURLOptions.ImmutabilityPolicyExpiry != nil {
+ req.Raw().Header.Set("x-ms-immutability-policy-until-date", blobClientStartCopyFromURLOptions.ImmutabilityPolicyExpiry.Format(time.RFC1123))
}
- if blobStartCopyFromURLOptions != nil && blobStartCopyFromURLOptions.BlobTagsString != nil {
- req.Raw().Header.Set("x-ms-tags", *blobStartCopyFromURLOptions.BlobTagsString)
+ if blobClientStartCopyFromURLOptions != nil && blobClientStartCopyFromURLOptions.ImmutabilityPolicyMode != nil {
+ req.Raw().Header.Set("x-ms-immutability-policy-mode", string(*blobClientStartCopyFromURLOptions.ImmutabilityPolicyMode))
}
- if blobStartCopyFromURLOptions != nil && blobStartCopyFromURLOptions.SealBlob != nil {
- req.Raw().Header.Set("x-ms-seal-blob", strconv.FormatBool(*blobStartCopyFromURLOptions.SealBlob))
+ if blobClientStartCopyFromURLOptions != nil && blobClientStartCopyFromURLOptions.LegalHold != nil {
+ req.Raw().Header.Set("x-ms-legal-hold", strconv.FormatBool(*blobClientStartCopyFromURLOptions.LegalHold))
}
req.Raw().Header.Set("Accept", "application/xml")
return req, nil
}
// startCopyFromURLHandleResponse handles the StartCopyFromURL response.
-func (client *blobClient) startCopyFromURLHandleResponse(resp *http.Response) (BlobStartCopyFromURLResponse, error) {
- result := BlobStartCopyFromURLResponse{RawResponse: resp}
+func (client *blobClient) startCopyFromURLHandleResponse(resp *http.Response) (blobClientStartCopyFromURLResponse, error) {
+ result := blobClientStartCopyFromURLResponse{RawResponse: resp}
if val := resp.Header.Get("ETag"); val != "" {
result.ETag = &val
}
if val := resp.Header.Get("Last-Modified"); val != "" {
lastModified, err := time.Parse(time.RFC1123, val)
if err != nil {
- return BlobStartCopyFromURLResponse{}, err
+ return blobClientStartCopyFromURLResponse{}, err
}
result.LastModified = &lastModified
}
@@ -2690,7 +2757,7 @@ func (client *blobClient) startCopyFromURLHandleResponse(resp *http.Response) (B
if val := resp.Header.Get("Date"); val != "" {
date, err := time.Parse(time.RFC1123, val)
if err != nil {
- return BlobStartCopyFromURLResponse{}, err
+ return blobClientStartCopyFromURLResponse{}, err
}
result.Date = &date
}
@@ -2704,25 +2771,26 @@ func (client *blobClient) startCopyFromURLHandleResponse(resp *http.Response) (B
}
// Undelete - Undelete a blob that was previously soft deleted
-// If the operation fails it returns the *StorageError error type.
-func (client *blobClient) Undelete(ctx context.Context, options *BlobUndeleteOptions) (BlobUndeleteResponse, error) {
+// If the operation fails it returns an *azcore.ResponseError type.
+// options - blobClientUndeleteOptions contains the optional parameters for the blobClient.Undelete method.
+func (client *blobClient) Undelete(ctx context.Context, options *blobClientUndeleteOptions) (blobClientUndeleteResponse, error) {
req, err := client.undeleteCreateRequest(ctx, options)
if err != nil {
- return BlobUndeleteResponse{}, err
+ return blobClientUndeleteResponse{}, err
}
- resp, err := client.con.Pipeline().Do(req)
+ resp, err := client.pl.Do(req)
if err != nil {
- return BlobUndeleteResponse{}, err
+ return blobClientUndeleteResponse{}, err
}
if !runtime.HasStatusCode(resp, http.StatusOK) {
- return BlobUndeleteResponse{}, runtime.NewResponseError(resp)
+ return blobClientUndeleteResponse{}, runtime.NewResponseError(resp)
}
return client.undeleteHandleResponse(resp)
}
// undeleteCreateRequest creates the Undelete request.
-func (client *blobClient) undeleteCreateRequest(ctx context.Context, options *BlobUndeleteOptions) (*policy.Request, error) {
- req, err := runtime.NewRequest(ctx, http.MethodPut, client.con.Endpoint())
+func (client *blobClient) undeleteCreateRequest(ctx context.Context, options *blobClientUndeleteOptions) (*policy.Request, error) {
+ req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint)
if err != nil {
return nil, err
}
@@ -2732,7 +2800,7 @@ func (client *blobClient) undeleteCreateRequest(ctx context.Context, options *Bl
reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
- req.Raw().Header.Set("x-ms-version", "2019-12-12")
+ req.Raw().Header.Set("x-ms-version", "2020-10-02")
if options != nil && options.RequestID != nil {
req.Raw().Header.Set("x-ms-client-request-id", *options.RequestID)
}
@@ -2741,8 +2809,8 @@ func (client *blobClient) undeleteCreateRequest(ctx context.Context, options *Bl
}
// undeleteHandleResponse handles the Undelete response.
-func (client *blobClient) undeleteHandleResponse(resp *http.Response) (BlobUndeleteResponse, error) {
- result := BlobUndeleteResponse{RawResponse: resp}
+func (client *blobClient) undeleteHandleResponse(resp *http.Response) (blobClientUndeleteResponse, error) {
+ result := blobClientUndeleteResponse{RawResponse: resp}
if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
result.ClientRequestID = &val
}
@@ -2755,7 +2823,7 @@ func (client *blobClient) undeleteHandleResponse(resp *http.Response) (BlobUndel
if val := resp.Header.Get("Date"); val != "" {
date, err := time.Parse(time.RFC1123, val)
if err != nil {
- return BlobUndeleteResponse{}, err
+ return blobClientUndeleteResponse{}, err
}
result.Date = &date
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_blockblob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_blockblob_client.go
index c1d93782a26..3f78a28aa40 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_blockblob_client.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_blockblob_client.go
@@ -1,5 +1,5 @@
-//go:build go1.16
-// +build go1.16
+//go:build go1.18
+// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
@@ -11,52 +11,71 @@ package azblob
import (
"context"
"encoding/base64"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
"io"
"net/http"
"strconv"
"time"
-
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
)
type blockBlobClient struct {
- con *connection
+ endpoint string
+ pl runtime.Pipeline
}
-// CommitBlockList - The Commit Block List operation writes a blob by specifying the list of block IDs that make up the blob. In order to be written as
-// part of a blob, a block must have been successfully written to the
-// server in a prior Put Block operation. You can call Put Block List to update a blob by uploading only those blocks that have changed, then committing
-// the new and existing blocks together. You can do
-// this by specifying whether to commit a block from the committed block list or from the uncommitted block list, or to commit the most recently uploaded
-// version of the block, whichever list it may
+// newBlockBlobClient creates a new instance of blockBlobClient with the specified values.
+// endpoint - The URL of the service account, container, or blob that is the target of the desired operation.
+// pl - the pipeline used for sending requests and handling responses.
+func newBlockBlobClient(endpoint string, pl runtime.Pipeline) *blockBlobClient {
+ client := &blockBlobClient{
+ endpoint: endpoint,
+ pl: pl,
+ }
+ return client
+}
+
+// CommitBlockList - The Commit Block List operation writes a blob by specifying the list of block IDs that make up the blob.
+// In order to be written as part of a blob, a block must have been successfully written to the
+// server in a prior Put Block operation. You can call Put Block List to update a blob by uploading only those blocks that
+// have changed, then committing the new and existing blocks together. You can do
+// this by specifying whether to commit a block from the committed block list or from the uncommitted block list, or to commit
+// the most recently uploaded version of the block, whichever list it may
// belong to.
-// If the operation fails it returns the *StorageError error type.
-func (client *blockBlobClient) CommitBlockList(ctx context.Context, blocks BlockLookupList, blockBlobCommitBlockListOptions *BlockBlobCommitBlockListOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (BlockBlobCommitBlockListResponse, error) {
- req, err := client.commitBlockListCreateRequest(ctx, blocks, blockBlobCommitBlockListOptions, blobHTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions)
+// If the operation fails it returns an *azcore.ResponseError type.
+// blocks - Blob Blocks.
+// blockBlobClientCommitBlockListOptions - blockBlobClientCommitBlockListOptions contains the optional parameters for the
+// blockBlobClient.CommitBlockList method.
+// BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the blobClient.SetHTTPHeaders method.
+// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method.
+// CpkInfo - CpkInfo contains a group of parameters for the blobClient.Download method.
+// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the blobClient.SetMetadata method.
+// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method.
+func (client *blockBlobClient) CommitBlockList(ctx context.Context, blocks BlockLookupList, blockBlobClientCommitBlockListOptions *blockBlobClientCommitBlockListOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (blockBlobClientCommitBlockListResponse, error) {
+ req, err := client.commitBlockListCreateRequest(ctx, blocks, blockBlobClientCommitBlockListOptions, blobHTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions)
if err != nil {
- return BlockBlobCommitBlockListResponse{}, err
+ return blockBlobClientCommitBlockListResponse{}, err
}
- resp, err := client.con.Pipeline().Do(req)
+ resp, err := client.pl.Do(req)
if err != nil {
- return BlockBlobCommitBlockListResponse{}, err
+ return blockBlobClientCommitBlockListResponse{}, err
}
if !runtime.HasStatusCode(resp, http.StatusCreated) {
- return BlockBlobCommitBlockListResponse{}, runtime.NewResponseError(resp)
+ return blockBlobClientCommitBlockListResponse{}, runtime.NewResponseError(resp)
}
return client.commitBlockListHandleResponse(resp)
}
// commitBlockListCreateRequest creates the CommitBlockList request.
-func (client *blockBlobClient) commitBlockListCreateRequest(ctx context.Context, blocks BlockLookupList, blockBlobCommitBlockListOptions *BlockBlobCommitBlockListOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) {
- req, err := runtime.NewRequest(ctx, http.MethodPut, client.con.Endpoint())
+func (client *blockBlobClient) commitBlockListCreateRequest(ctx context.Context, blocks BlockLookupList, blockBlobClientCommitBlockListOptions *blockBlobClientCommitBlockListOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) {
+ req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint)
if err != nil {
return nil, err
}
reqQP := req.Raw().URL.Query()
reqQP.Set("comp", "blocklist")
- if blockBlobCommitBlockListOptions != nil && blockBlobCommitBlockListOptions.Timeout != nil {
- reqQP.Set("timeout", strconv.FormatInt(int64(*blockBlobCommitBlockListOptions.Timeout), 10))
+ if blockBlobClientCommitBlockListOptions != nil && blockBlobClientCommitBlockListOptions.Timeout != nil {
+ reqQP.Set("timeout", strconv.FormatInt(int64(*blockBlobClientCommitBlockListOptions.Timeout), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil {
@@ -74,14 +93,14 @@ func (client *blockBlobClient) commitBlockListCreateRequest(ctx context.Context,
if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentMD5 != nil {
req.Raw().Header.Set("x-ms-blob-content-md5", base64.StdEncoding.EncodeToString(blobHTTPHeaders.BlobContentMD5))
}
- if blockBlobCommitBlockListOptions != nil && blockBlobCommitBlockListOptions.TransactionalContentMD5 != nil {
- req.Raw().Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(blockBlobCommitBlockListOptions.TransactionalContentMD5))
+ if blockBlobClientCommitBlockListOptions != nil && blockBlobClientCommitBlockListOptions.TransactionalContentMD5 != nil {
+ req.Raw().Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(blockBlobClientCommitBlockListOptions.TransactionalContentMD5))
}
- if blockBlobCommitBlockListOptions != nil && blockBlobCommitBlockListOptions.TransactionalContentCRC64 != nil {
- req.Raw().Header.Set("x-ms-content-crc64", base64.StdEncoding.EncodeToString(blockBlobCommitBlockListOptions.TransactionalContentCRC64))
+ if blockBlobClientCommitBlockListOptions != nil && blockBlobClientCommitBlockListOptions.TransactionalContentCRC64 != nil {
+ req.Raw().Header.Set("x-ms-content-crc64", base64.StdEncoding.EncodeToString(blockBlobClientCommitBlockListOptions.TransactionalContentCRC64))
}
- if blockBlobCommitBlockListOptions != nil && blockBlobCommitBlockListOptions.Metadata != nil {
- for k, v := range blockBlobCommitBlockListOptions.Metadata {
+ if blockBlobClientCommitBlockListOptions != nil && blockBlobClientCommitBlockListOptions.Metadata != nil {
+ for k, v := range blockBlobClientCommitBlockListOptions.Metadata {
req.Raw().Header.Set("x-ms-meta-"+k, v)
}
}
@@ -98,13 +117,13 @@ func (client *blockBlobClient) commitBlockListCreateRequest(ctx context.Context,
req.Raw().Header.Set("x-ms-encryption-key-sha256", *cpkInfo.EncryptionKeySHA256)
}
if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil {
- req.Raw().Header.Set("x-ms-encryption-algorithm", "AES256")
+ req.Raw().Header.Set("x-ms-encryption-algorithm", string(*cpkInfo.EncryptionAlgorithm))
}
if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil {
req.Raw().Header.Set("x-ms-encryption-scope", *cpkScopeInfo.EncryptionScope)
}
- if blockBlobCommitBlockListOptions != nil && blockBlobCommitBlockListOptions.Tier != nil {
- req.Raw().Header.Set("x-ms-access-tier", string(*blockBlobCommitBlockListOptions.Tier))
+ if blockBlobClientCommitBlockListOptions != nil && blockBlobClientCommitBlockListOptions.Tier != nil {
+ req.Raw().Header.Set("x-ms-access-tier", string(*blockBlobClientCommitBlockListOptions.Tier))
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123))
@@ -121,41 +140,50 @@ func (client *blockBlobClient) commitBlockListCreateRequest(ctx context.Context,
if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil {
req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags)
}
- req.Raw().Header.Set("x-ms-version", "2019-12-12")
- if blockBlobCommitBlockListOptions != nil && blockBlobCommitBlockListOptions.RequestID != nil {
- req.Raw().Header.Set("x-ms-client-request-id", *blockBlobCommitBlockListOptions.RequestID)
+ req.Raw().Header.Set("x-ms-version", "2020-10-02")
+ if blockBlobClientCommitBlockListOptions != nil && blockBlobClientCommitBlockListOptions.RequestID != nil {
+ req.Raw().Header.Set("x-ms-client-request-id", *blockBlobClientCommitBlockListOptions.RequestID)
+ }
+ if blockBlobClientCommitBlockListOptions != nil && blockBlobClientCommitBlockListOptions.BlobTagsString != nil {
+ req.Raw().Header.Set("x-ms-tags", *blockBlobClientCommitBlockListOptions.BlobTagsString)
+ }
+ if blockBlobClientCommitBlockListOptions != nil && blockBlobClientCommitBlockListOptions.ImmutabilityPolicyExpiry != nil {
+ req.Raw().Header.Set("x-ms-immutability-policy-until-date", blockBlobClientCommitBlockListOptions.ImmutabilityPolicyExpiry.Format(time.RFC1123))
+ }
+ if blockBlobClientCommitBlockListOptions != nil && blockBlobClientCommitBlockListOptions.ImmutabilityPolicyMode != nil {
+ req.Raw().Header.Set("x-ms-immutability-policy-mode", string(*blockBlobClientCommitBlockListOptions.ImmutabilityPolicyMode))
}
- if blockBlobCommitBlockListOptions != nil && blockBlobCommitBlockListOptions.BlobTagsString != nil {
- req.Raw().Header.Set("x-ms-tags", *blockBlobCommitBlockListOptions.BlobTagsString)
+ if blockBlobClientCommitBlockListOptions != nil && blockBlobClientCommitBlockListOptions.LegalHold != nil {
+ req.Raw().Header.Set("x-ms-legal-hold", strconv.FormatBool(*blockBlobClientCommitBlockListOptions.LegalHold))
}
req.Raw().Header.Set("Accept", "application/xml")
return req, runtime.MarshalAsXML(req, blocks)
}
// commitBlockListHandleResponse handles the CommitBlockList response.
-func (client *blockBlobClient) commitBlockListHandleResponse(resp *http.Response) (BlockBlobCommitBlockListResponse, error) {
- result := BlockBlobCommitBlockListResponse{RawResponse: resp}
+func (client *blockBlobClient) commitBlockListHandleResponse(resp *http.Response) (blockBlobClientCommitBlockListResponse, error) {
+ result := blockBlobClientCommitBlockListResponse{RawResponse: resp}
if val := resp.Header.Get("ETag"); val != "" {
result.ETag = &val
}
if val := resp.Header.Get("Last-Modified"); val != "" {
lastModified, err := time.Parse(time.RFC1123, val)
if err != nil {
- return BlockBlobCommitBlockListResponse{}, err
+ return blockBlobClientCommitBlockListResponse{}, err
}
result.LastModified = &lastModified
}
if val := resp.Header.Get("Content-MD5"); val != "" {
contentMD5, err := base64.StdEncoding.DecodeString(val)
if err != nil {
- return BlockBlobCommitBlockListResponse{}, err
+ return blockBlobClientCommitBlockListResponse{}, err
}
result.ContentMD5 = contentMD5
}
if val := resp.Header.Get("x-ms-content-crc64"); val != "" {
xMSContentCRC64, err := base64.StdEncoding.DecodeString(val)
if err != nil {
- return BlockBlobCommitBlockListResponse{}, err
+ return blockBlobClientCommitBlockListResponse{}, err
}
result.XMSContentCRC64 = xMSContentCRC64
}
@@ -174,14 +202,14 @@ func (client *blockBlobClient) commitBlockListHandleResponse(resp *http.Response
if val := resp.Header.Get("Date"); val != "" {
date, err := time.Parse(time.RFC1123, val)
if err != nil {
- return BlockBlobCommitBlockListResponse{}, err
+ return blockBlobClientCommitBlockListResponse{}, err
}
result.Date = &date
}
if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" {
isServerEncrypted, err := strconv.ParseBool(val)
if err != nil {
- return BlockBlobCommitBlockListResponse{}, err
+ return blockBlobClientCommitBlockListResponse{}, err
}
result.IsServerEncrypted = &isServerEncrypted
}
@@ -195,36 +223,41 @@ func (client *blockBlobClient) commitBlockListHandleResponse(resp *http.Response
}
// GetBlockList - The Get Block List operation retrieves the list of blocks that have been uploaded as part of a block blob
-// If the operation fails it returns the *StorageError error type.
-func (client *blockBlobClient) GetBlockList(ctx context.Context, listType BlockListType, blockBlobGetBlockListOptions *BlockBlobGetBlockListOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (BlockBlobGetBlockListResponse, error) {
- req, err := client.getBlockListCreateRequest(ctx, listType, blockBlobGetBlockListOptions, leaseAccessConditions, modifiedAccessConditions)
+// If the operation fails it returns an *azcore.ResponseError type.
+// listType - Specifies whether to return the list of committed blocks, the list of uncommitted blocks, or both lists together.
+// blockBlobClientGetBlockListOptions - blockBlobClientGetBlockListOptions contains the optional parameters for the blockBlobClient.GetBlockList
+// method.
+// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method.
+// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method.
+func (client *blockBlobClient) GetBlockList(ctx context.Context, listType BlockListType, blockBlobClientGetBlockListOptions *blockBlobClientGetBlockListOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (blockBlobClientGetBlockListResponse, error) {
+ req, err := client.getBlockListCreateRequest(ctx, listType, blockBlobClientGetBlockListOptions, leaseAccessConditions, modifiedAccessConditions)
if err != nil {
- return BlockBlobGetBlockListResponse{}, err
+ return blockBlobClientGetBlockListResponse{}, err
}
- resp, err := client.con.Pipeline().Do(req)
+ resp, err := client.pl.Do(req)
if err != nil {
- return BlockBlobGetBlockListResponse{}, err
+ return blockBlobClientGetBlockListResponse{}, err
}
if !runtime.HasStatusCode(resp, http.StatusOK) {
- return BlockBlobGetBlockListResponse{}, runtime.NewResponseError(resp)
+ return blockBlobClientGetBlockListResponse{}, runtime.NewResponseError(resp)
}
return client.getBlockListHandleResponse(resp)
}
// getBlockListCreateRequest creates the GetBlockList request.
-func (client *blockBlobClient) getBlockListCreateRequest(ctx context.Context, listType BlockListType, blockBlobGetBlockListOptions *BlockBlobGetBlockListOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) {
- req, err := runtime.NewRequest(ctx, http.MethodGet, client.con.Endpoint())
+func (client *blockBlobClient) getBlockListCreateRequest(ctx context.Context, listType BlockListType, blockBlobClientGetBlockListOptions *blockBlobClientGetBlockListOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) {
+ req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint)
if err != nil {
return nil, err
}
reqQP := req.Raw().URL.Query()
reqQP.Set("comp", "blocklist")
- if blockBlobGetBlockListOptions != nil && blockBlobGetBlockListOptions.Snapshot != nil {
- reqQP.Set("snapshot", *blockBlobGetBlockListOptions.Snapshot)
+ if blockBlobClientGetBlockListOptions != nil && blockBlobClientGetBlockListOptions.Snapshot != nil {
+ reqQP.Set("snapshot", *blockBlobClientGetBlockListOptions.Snapshot)
}
reqQP.Set("blocklisttype", string(listType))
- if blockBlobGetBlockListOptions != nil && blockBlobGetBlockListOptions.Timeout != nil {
- reqQP.Set("timeout", strconv.FormatInt(int64(*blockBlobGetBlockListOptions.Timeout), 10))
+ if blockBlobClientGetBlockListOptions != nil && blockBlobClientGetBlockListOptions.Timeout != nil {
+ reqQP.Set("timeout", strconv.FormatInt(int64(*blockBlobClientGetBlockListOptions.Timeout), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
@@ -233,21 +266,21 @@ func (client *blockBlobClient) getBlockListCreateRequest(ctx context.Context, li
if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil {
req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags)
}
- req.Raw().Header.Set("x-ms-version", "2019-12-12")
- if blockBlobGetBlockListOptions != nil && blockBlobGetBlockListOptions.RequestID != nil {
- req.Raw().Header.Set("x-ms-client-request-id", *blockBlobGetBlockListOptions.RequestID)
+ req.Raw().Header.Set("x-ms-version", "2020-10-02")
+ if blockBlobClientGetBlockListOptions != nil && blockBlobClientGetBlockListOptions.RequestID != nil {
+ req.Raw().Header.Set("x-ms-client-request-id", *blockBlobClientGetBlockListOptions.RequestID)
}
req.Raw().Header.Set("Accept", "application/xml")
return req, nil
}
// getBlockListHandleResponse handles the GetBlockList response.
-func (client *blockBlobClient) getBlockListHandleResponse(resp *http.Response) (BlockBlobGetBlockListResponse, error) {
- result := BlockBlobGetBlockListResponse{RawResponse: resp}
+func (client *blockBlobClient) getBlockListHandleResponse(resp *http.Response) (blockBlobClientGetBlockListResponse, error) {
+ result := blockBlobClientGetBlockListResponse{RawResponse: resp}
if val := resp.Header.Get("Last-Modified"); val != "" {
lastModified, err := time.Parse(time.RFC1123, val)
if err != nil {
- return BlockBlobGetBlockListResponse{}, err
+ return blockBlobClientGetBlockListResponse{}, err
}
result.LastModified = &lastModified
}
@@ -260,7 +293,7 @@ func (client *blockBlobClient) getBlockListHandleResponse(resp *http.Response) (
if val := resp.Header.Get("x-ms-blob-content-length"); val != "" {
blobContentLength, err := strconv.ParseInt(val, 10, 64)
if err != nil {
- return BlockBlobGetBlockListResponse{}, err
+ return blockBlobClientGetBlockListResponse{}, err
}
result.BlobContentLength = &blobContentLength
}
@@ -276,52 +309,259 @@ func (client *blockBlobClient) getBlockListHandleResponse(resp *http.Response) (
if val := resp.Header.Get("Date"); val != "" {
date, err := time.Parse(time.RFC1123, val)
if err != nil {
- return BlockBlobGetBlockListResponse{}, err
+ return blockBlobClientGetBlockListResponse{}, err
}
result.Date = &date
}
if err := runtime.UnmarshalAsXML(resp, &result.BlockList); err != nil {
- return BlockBlobGetBlockListResponse{}, err
+ return blockBlobClientGetBlockListResponse{}, err
+ }
+ return result, nil
+}
+
+// PutBlobFromURL - The Put Blob from URL operation creates a new Block Blob where the contents of the blob are read from
+// a given URL. This API is supported beginning with the 2020-04-08 version. Partial updates are not
+// supported with Put Blob from URL; the content of an existing blob is overwritten with the content of the new blob. To perform
+// partial updates to a block blob’s contents using a source URL, use the Put
+// Block from URL API in conjunction with Put Block List.
+// If the operation fails it returns an *azcore.ResponseError type.
+// contentLength - The length of the request.
+// copySource - Specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that specifies
+// a page blob snapshot. The value should be URL-encoded as it would appear in a request
+// URI. The source blob must either be public or must be authenticated via a shared access signature.
+// blockBlobClientPutBlobFromURLOptions - blockBlobClientPutBlobFromURLOptions contains the optional parameters for the blockBlobClient.PutBlobFromURL
+// method.
+// BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the blobClient.SetHTTPHeaders method.
+// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method.
+// CpkInfo - CpkInfo contains a group of parameters for the blobClient.Download method.
+// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the blobClient.SetMetadata method.
+// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method.
+// SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the blobClient.StartCopyFromURL
+// method.
+func (client *blockBlobClient) PutBlobFromURL(ctx context.Context, contentLength int64, copySource string, blockBlobClientPutBlobFromURLOptions *blockBlobClientPutBlobFromURLOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (blockBlobClientPutBlobFromURLResponse, error) {
+ req, err := client.putBlobFromURLCreateRequest(ctx, contentLength, copySource, blockBlobClientPutBlobFromURLOptions, blobHTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions, sourceModifiedAccessConditions)
+ if err != nil {
+ return blockBlobClientPutBlobFromURLResponse{}, err
+ }
+ resp, err := client.pl.Do(req)
+ if err != nil {
+ return blockBlobClientPutBlobFromURLResponse{}, err
+ }
+ if !runtime.HasStatusCode(resp, http.StatusCreated) {
+ return blockBlobClientPutBlobFromURLResponse{}, runtime.NewResponseError(resp)
+ }
+ return client.putBlobFromURLHandleResponse(resp)
+}
+
+// putBlobFromURLCreateRequest creates the PutBlobFromURL request.
+func (client *blockBlobClient) putBlobFromURLCreateRequest(ctx context.Context, contentLength int64, copySource string, blockBlobClientPutBlobFromURLOptions *blockBlobClientPutBlobFromURLOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (*policy.Request, error) {
+ req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint)
+ if err != nil {
+ return nil, err
+ }
+ reqQP := req.Raw().URL.Query()
+ if blockBlobClientPutBlobFromURLOptions != nil && blockBlobClientPutBlobFromURLOptions.Timeout != nil {
+ reqQP.Set("timeout", strconv.FormatInt(int64(*blockBlobClientPutBlobFromURLOptions.Timeout), 10))
+ }
+ req.Raw().URL.RawQuery = reqQP.Encode()
+ req.Raw().Header.Set("x-ms-blob-type", "BlockBlob")
+ if blockBlobClientPutBlobFromURLOptions != nil && blockBlobClientPutBlobFromURLOptions.TransactionalContentMD5 != nil {
+ req.Raw().Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(blockBlobClientPutBlobFromURLOptions.TransactionalContentMD5))
+ }
+ req.Raw().Header.Set("Content-Length", strconv.FormatInt(contentLength, 10))
+ if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil {
+ req.Raw().Header.Set("x-ms-blob-content-type", *blobHTTPHeaders.BlobContentType)
+ }
+ if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentEncoding != nil {
+ req.Raw().Header.Set("x-ms-blob-content-encoding", *blobHTTPHeaders.BlobContentEncoding)
+ }
+ if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentLanguage != nil {
+ req.Raw().Header.Set("x-ms-blob-content-language", *blobHTTPHeaders.BlobContentLanguage)
+ }
+ if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentMD5 != nil {
+ req.Raw().Header.Set("x-ms-blob-content-md5", base64.StdEncoding.EncodeToString(blobHTTPHeaders.BlobContentMD5))
+ }
+ if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil {
+ req.Raw().Header.Set("x-ms-blob-cache-control", *blobHTTPHeaders.BlobCacheControl)
+ }
+ if blockBlobClientPutBlobFromURLOptions != nil && blockBlobClientPutBlobFromURLOptions.Metadata != nil {
+ for k, v := range blockBlobClientPutBlobFromURLOptions.Metadata {
+ req.Raw().Header.Set("x-ms-meta-"+k, v)
+ }
+ }
+ if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
+ req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID)
+ }
+ if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil {
+ req.Raw().Header.Set("x-ms-blob-content-disposition", *blobHTTPHeaders.BlobContentDisposition)
+ }
+ if cpkInfo != nil && cpkInfo.EncryptionKey != nil {
+ req.Raw().Header.Set("x-ms-encryption-key", *cpkInfo.EncryptionKey)
+ }
+ if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil {
+ req.Raw().Header.Set("x-ms-encryption-key-sha256", *cpkInfo.EncryptionKeySHA256)
+ }
+ if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil {
+ req.Raw().Header.Set("x-ms-encryption-algorithm", string(*cpkInfo.EncryptionAlgorithm))
+ }
+ if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil {
+ req.Raw().Header.Set("x-ms-encryption-scope", *cpkScopeInfo.EncryptionScope)
+ }
+ if blockBlobClientPutBlobFromURLOptions != nil && blockBlobClientPutBlobFromURLOptions.Tier != nil {
+ req.Raw().Header.Set("x-ms-access-tier", string(*blockBlobClientPutBlobFromURLOptions.Tier))
+ }
+ if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
+ req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123))
+ }
+ if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
+ req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123))
+ }
+ if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil {
+ req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch)
+ }
+ if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil {
+ req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch)
+ }
+ if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil {
+ req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags)
+ }
+ if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil {
+ req.Raw().Header.Set("x-ms-source-if-modified-since", sourceModifiedAccessConditions.SourceIfModifiedSince.Format(time.RFC1123))
+ }
+ if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil {
+ req.Raw().Header.Set("x-ms-source-if-unmodified-since", sourceModifiedAccessConditions.SourceIfUnmodifiedSince.Format(time.RFC1123))
+ }
+ if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil {
+ req.Raw().Header.Set("x-ms-source-if-match", *sourceModifiedAccessConditions.SourceIfMatch)
+ }
+ if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil {
+ req.Raw().Header.Set("x-ms-source-if-none-match", *sourceModifiedAccessConditions.SourceIfNoneMatch)
+ }
+ if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfTags != nil {
+ req.Raw().Header.Set("x-ms-source-if-tags", *sourceModifiedAccessConditions.SourceIfTags)
+ }
+ req.Raw().Header.Set("x-ms-version", "2020-10-02")
+ if blockBlobClientPutBlobFromURLOptions != nil && blockBlobClientPutBlobFromURLOptions.RequestID != nil {
+ req.Raw().Header.Set("x-ms-client-request-id", *blockBlobClientPutBlobFromURLOptions.RequestID)
+ }
+ if blockBlobClientPutBlobFromURLOptions != nil && blockBlobClientPutBlobFromURLOptions.SourceContentMD5 != nil {
+ req.Raw().Header.Set("x-ms-source-content-md5", base64.StdEncoding.EncodeToString(blockBlobClientPutBlobFromURLOptions.SourceContentMD5))
+ }
+ if blockBlobClientPutBlobFromURLOptions != nil && blockBlobClientPutBlobFromURLOptions.BlobTagsString != nil {
+ req.Raw().Header.Set("x-ms-tags", *blockBlobClientPutBlobFromURLOptions.BlobTagsString)
+ }
+ req.Raw().Header.Set("x-ms-copy-source", copySource)
+ if blockBlobClientPutBlobFromURLOptions != nil && blockBlobClientPutBlobFromURLOptions.CopySourceBlobProperties != nil {
+ req.Raw().Header.Set("x-ms-copy-source-blob-properties", strconv.FormatBool(*blockBlobClientPutBlobFromURLOptions.CopySourceBlobProperties))
+ }
+ if blockBlobClientPutBlobFromURLOptions != nil && blockBlobClientPutBlobFromURLOptions.CopySourceAuthorization != nil {
+ req.Raw().Header.Set("x-ms-copy-source-authorization", *blockBlobClientPutBlobFromURLOptions.CopySourceAuthorization)
+ }
+ req.Raw().Header.Set("Accept", "application/xml")
+ return req, nil
+}
+
+// putBlobFromURLHandleResponse handles the PutBlobFromURL response.
+func (client *blockBlobClient) putBlobFromURLHandleResponse(resp *http.Response) (blockBlobClientPutBlobFromURLResponse, error) {
+ result := blockBlobClientPutBlobFromURLResponse{RawResponse: resp}
+ if val := resp.Header.Get("ETag"); val != "" {
+ result.ETag = &val
+ }
+ if val := resp.Header.Get("Last-Modified"); val != "" {
+ lastModified, err := time.Parse(time.RFC1123, val)
+ if err != nil {
+ return blockBlobClientPutBlobFromURLResponse{}, err
+ }
+ result.LastModified = &lastModified
+ }
+ if val := resp.Header.Get("Content-MD5"); val != "" {
+ contentMD5, err := base64.StdEncoding.DecodeString(val)
+ if err != nil {
+ return blockBlobClientPutBlobFromURLResponse{}, err
+ }
+ result.ContentMD5 = contentMD5
+ }
+ if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
+ result.ClientRequestID = &val
+ }
+ if val := resp.Header.Get("x-ms-request-id"); val != "" {
+ result.RequestID = &val
+ }
+ if val := resp.Header.Get("x-ms-version"); val != "" {
+ result.Version = &val
+ }
+ if val := resp.Header.Get("x-ms-version-id"); val != "" {
+ result.VersionID = &val
+ }
+ if val := resp.Header.Get("Date"); val != "" {
+ date, err := time.Parse(time.RFC1123, val)
+ if err != nil {
+ return blockBlobClientPutBlobFromURLResponse{}, err
+ }
+ result.Date = &date
+ }
+ if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" {
+ isServerEncrypted, err := strconv.ParseBool(val)
+ if err != nil {
+ return blockBlobClientPutBlobFromURLResponse{}, err
+ }
+ result.IsServerEncrypted = &isServerEncrypted
+ }
+ if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" {
+ result.EncryptionKeySHA256 = &val
+ }
+ if val := resp.Header.Get("x-ms-encryption-scope"); val != "" {
+ result.EncryptionScope = &val
}
return result, nil
}
// StageBlock - The Stage Block operation creates a new block to be committed as part of a blob
-// If the operation fails it returns the *StorageError error type.
-func (client *blockBlobClient) StageBlock(ctx context.Context, blockID string, contentLength int64, body io.ReadSeekCloser, blockBlobStageBlockOptions *BlockBlobStageBlockOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo) (BlockBlobStageBlockResponse, error) {
- req, err := client.stageBlockCreateRequest(ctx, blockID, contentLength, body, blockBlobStageBlockOptions, leaseAccessConditions, cpkInfo, cpkScopeInfo)
+// If the operation fails it returns an *azcore.ResponseError type.
+// blockID - A valid Base64 string value that identifies the block. Prior to encoding, the string must be less than or equal
+// to 64 bytes in size. For a given blob, the length of the value specified for the blockid
+// parameter must be the same size for each block.
+// contentLength - The length of the request.
+// body - Initial data
+// blockBlobClientStageBlockOptions - blockBlobClientStageBlockOptions contains the optional parameters for the blockBlobClient.StageBlock
+// method.
+// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method.
+// CpkInfo - CpkInfo contains a group of parameters for the blobClient.Download method.
+// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the blobClient.SetMetadata method.
+func (client *blockBlobClient) StageBlock(ctx context.Context, blockID string, contentLength int64, body io.ReadSeekCloser, blockBlobClientStageBlockOptions *blockBlobClientStageBlockOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo) (blockBlobClientStageBlockResponse, error) {
+ req, err := client.stageBlockCreateRequest(ctx, blockID, contentLength, body, blockBlobClientStageBlockOptions, leaseAccessConditions, cpkInfo, cpkScopeInfo)
if err != nil {
- return BlockBlobStageBlockResponse{}, err
+ return blockBlobClientStageBlockResponse{}, err
}
- resp, err := client.con.Pipeline().Do(req)
+ resp, err := client.pl.Do(req)
if err != nil {
- return BlockBlobStageBlockResponse{}, err
+ return blockBlobClientStageBlockResponse{}, err
}
if !runtime.HasStatusCode(resp, http.StatusCreated) {
- return BlockBlobStageBlockResponse{}, runtime.NewResponseError(resp)
+ return blockBlobClientStageBlockResponse{}, runtime.NewResponseError(resp)
}
return client.stageBlockHandleResponse(resp)
}
// stageBlockCreateRequest creates the StageBlock request.
-func (client *blockBlobClient) stageBlockCreateRequest(ctx context.Context, blockID string, contentLength int64, body io.ReadSeekCloser, blockBlobStageBlockOptions *BlockBlobStageBlockOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo) (*policy.Request, error) {
- req, err := runtime.NewRequest(ctx, http.MethodPut, client.con.Endpoint())
+func (client *blockBlobClient) stageBlockCreateRequest(ctx context.Context, blockID string, contentLength int64, body io.ReadSeekCloser, blockBlobClientStageBlockOptions *blockBlobClientStageBlockOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo) (*policy.Request, error) {
+ req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint)
if err != nil {
return nil, err
}
reqQP := req.Raw().URL.Query()
reqQP.Set("comp", "block")
reqQP.Set("blockid", blockID)
- if blockBlobStageBlockOptions != nil && blockBlobStageBlockOptions.Timeout != nil {
- reqQP.Set("timeout", strconv.FormatInt(int64(*blockBlobStageBlockOptions.Timeout), 10))
+ if blockBlobClientStageBlockOptions != nil && blockBlobClientStageBlockOptions.Timeout != nil {
+ reqQP.Set("timeout", strconv.FormatInt(int64(*blockBlobClientStageBlockOptions.Timeout), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
req.Raw().Header.Set("Content-Length", strconv.FormatInt(contentLength, 10))
- if blockBlobStageBlockOptions != nil && blockBlobStageBlockOptions.TransactionalContentMD5 != nil {
- req.Raw().Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(blockBlobStageBlockOptions.TransactionalContentMD5))
+ if blockBlobClientStageBlockOptions != nil && blockBlobClientStageBlockOptions.TransactionalContentMD5 != nil {
+ req.Raw().Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(blockBlobClientStageBlockOptions.TransactionalContentMD5))
}
- if blockBlobStageBlockOptions != nil && blockBlobStageBlockOptions.TransactionalContentCRC64 != nil {
- req.Raw().Header.Set("x-ms-content-crc64", base64.StdEncoding.EncodeToString(blockBlobStageBlockOptions.TransactionalContentCRC64))
+ if blockBlobClientStageBlockOptions != nil && blockBlobClientStageBlockOptions.TransactionalContentCRC64 != nil {
+ req.Raw().Header.Set("x-ms-content-crc64", base64.StdEncoding.EncodeToString(blockBlobClientStageBlockOptions.TransactionalContentCRC64))
}
if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID)
@@ -333,26 +573,26 @@ func (client *blockBlobClient) stageBlockCreateRequest(ctx context.Context, bloc
req.Raw().Header.Set("x-ms-encryption-key-sha256", *cpkInfo.EncryptionKeySHA256)
}
if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil {
- req.Raw().Header.Set("x-ms-encryption-algorithm", "AES256")
+ req.Raw().Header.Set("x-ms-encryption-algorithm", string(*cpkInfo.EncryptionAlgorithm))
}
if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil {
req.Raw().Header.Set("x-ms-encryption-scope", *cpkScopeInfo.EncryptionScope)
}
- req.Raw().Header.Set("x-ms-version", "2019-12-12")
- if blockBlobStageBlockOptions != nil && blockBlobStageBlockOptions.RequestID != nil {
- req.Raw().Header.Set("x-ms-client-request-id", *blockBlobStageBlockOptions.RequestID)
+ req.Raw().Header.Set("x-ms-version", "2020-10-02")
+ if blockBlobClientStageBlockOptions != nil && blockBlobClientStageBlockOptions.RequestID != nil {
+ req.Raw().Header.Set("x-ms-client-request-id", *blockBlobClientStageBlockOptions.RequestID)
}
req.Raw().Header.Set("Accept", "application/xml")
return req, req.SetBody(body, "application/octet-stream")
}
// stageBlockHandleResponse handles the StageBlock response.
-func (client *blockBlobClient) stageBlockHandleResponse(resp *http.Response) (BlockBlobStageBlockResponse, error) {
- result := BlockBlobStageBlockResponse{RawResponse: resp}
+func (client *blockBlobClient) stageBlockHandleResponse(resp *http.Response) (blockBlobClientStageBlockResponse, error) {
+ result := blockBlobClientStageBlockResponse{RawResponse: resp}
if val := resp.Header.Get("Content-MD5"); val != "" {
contentMD5, err := base64.StdEncoding.DecodeString(val)
if err != nil {
- return BlockBlobStageBlockResponse{}, err
+ return blockBlobClientStageBlockResponse{}, err
}
result.ContentMD5 = contentMD5
}
@@ -368,21 +608,21 @@ func (client *blockBlobClient) stageBlockHandleResponse(resp *http.Response) (Bl
if val := resp.Header.Get("Date"); val != "" {
date, err := time.Parse(time.RFC1123, val)
if err != nil {
- return BlockBlobStageBlockResponse{}, err
+ return blockBlobClientStageBlockResponse{}, err
}
result.Date = &date
}
if val := resp.Header.Get("x-ms-content-crc64"); val != "" {
xMSContentCRC64, err := base64.StdEncoding.DecodeString(val)
if err != nil {
- return BlockBlobStageBlockResponse{}, err
+ return blockBlobClientStageBlockResponse{}, err
}
result.XMSContentCRC64 = xMSContentCRC64
}
if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" {
isServerEncrypted, err := strconv.ParseBool(val)
if err != nil {
- return BlockBlobStageBlockResponse{}, err
+ return blockBlobClientStageBlockResponse{}, err
}
result.IsServerEncrypted = &isServerEncrypted
}
@@ -395,46 +635,59 @@ func (client *blockBlobClient) stageBlockHandleResponse(resp *http.Response) (Bl
return result, nil
}
-// StageBlockFromURL - The Stage Block operation creates a new block to be committed as part of a blob where the contents are read from a URL.
-// If the operation fails it returns the *StorageError error type.
-func (client *blockBlobClient) StageBlockFromURL(ctx context.Context, blockID string, contentLength int64, sourceURL string, blockBlobStageBlockFromURLOptions *BlockBlobStageBlockFromURLOptions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, leaseAccessConditions *LeaseAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (BlockBlobStageBlockFromURLResponse, error) {
- req, err := client.stageBlockFromURLCreateRequest(ctx, blockID, contentLength, sourceURL, blockBlobStageBlockFromURLOptions, cpkInfo, cpkScopeInfo, leaseAccessConditions, sourceModifiedAccessConditions)
+// StageBlockFromURL - The Stage Block operation creates a new block to be committed as part of a blob where the contents
+// are read from a URL.
+// If the operation fails it returns an *azcore.ResponseError type.
+// blockID - A valid Base64 string value that identifies the block. Prior to encoding, the string must be less than or equal
+// to 64 bytes in size. For a given blob, the length of the value specified for the blockid
+// parameter must be the same size for each block.
+// contentLength - The length of the request.
+// sourceURL - Specify a URL to the copy source.
+// blockBlobClientStageBlockFromURLOptions - blockBlobClientStageBlockFromURLOptions contains the optional parameters for
+// the blockBlobClient.StageBlockFromURL method.
+// CpkInfo - CpkInfo contains a group of parameters for the blobClient.Download method.
+// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the blobClient.SetMetadata method.
+// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method.
+// SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the blobClient.StartCopyFromURL
+// method.
+func (client *blockBlobClient) StageBlockFromURL(ctx context.Context, blockID string, contentLength int64, sourceURL string, blockBlobClientStageBlockFromURLOptions *blockBlobClientStageBlockFromURLOptions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, leaseAccessConditions *LeaseAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (blockBlobClientStageBlockFromURLResponse, error) {
+ req, err := client.stageBlockFromURLCreateRequest(ctx, blockID, contentLength, sourceURL, blockBlobClientStageBlockFromURLOptions, cpkInfo, cpkScopeInfo, leaseAccessConditions, sourceModifiedAccessConditions)
if err != nil {
- return BlockBlobStageBlockFromURLResponse{}, err
+ return blockBlobClientStageBlockFromURLResponse{}, err
}
- resp, err := client.con.Pipeline().Do(req)
+ resp, err := client.pl.Do(req)
if err != nil {
- return BlockBlobStageBlockFromURLResponse{}, err
+ return blockBlobClientStageBlockFromURLResponse{}, err
}
if !runtime.HasStatusCode(resp, http.StatusCreated) {
- return BlockBlobStageBlockFromURLResponse{}, runtime.NewResponseError(resp)
+ return blockBlobClientStageBlockFromURLResponse{}, runtime.NewResponseError(resp)
}
return client.stageBlockFromURLHandleResponse(resp)
}
// stageBlockFromURLCreateRequest creates the StageBlockFromURL request.
-func (client *blockBlobClient) stageBlockFromURLCreateRequest(ctx context.Context, blockID string, contentLength int64, sourceURL string, blockBlobStageBlockFromURLOptions *BlockBlobStageBlockFromURLOptions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, leaseAccessConditions *LeaseAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (*policy.Request, error) {
- req, err := runtime.NewRequest(ctx, http.MethodPut, client.con.Endpoint())
+func (client *blockBlobClient) stageBlockFromURLCreateRequest(ctx context.Context, blockID string, contentLength int64, sourceURL string, blockBlobClientStageBlockFromURLOptions *blockBlobClientStageBlockFromURLOptions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, leaseAccessConditions *LeaseAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (*policy.Request, error) {
+ req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint)
if err != nil {
return nil, err
}
reqQP := req.Raw().URL.Query()
reqQP.Set("comp", "block")
reqQP.Set("blockid", blockID)
- if blockBlobStageBlockFromURLOptions != nil && blockBlobStageBlockFromURLOptions.Timeout != nil {
- reqQP.Set("timeout", strconv.FormatInt(int64(*blockBlobStageBlockFromURLOptions.Timeout), 10))
+ if blockBlobClientStageBlockFromURLOptions != nil && blockBlobClientStageBlockFromURLOptions.Timeout != nil {
+ reqQP.Set("timeout", strconv.FormatInt(int64(*blockBlobClientStageBlockFromURLOptions.Timeout), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
req.Raw().Header.Set("Content-Length", strconv.FormatInt(contentLength, 10))
req.Raw().Header.Set("x-ms-copy-source", sourceURL)
- if blockBlobStageBlockFromURLOptions != nil && blockBlobStageBlockFromURLOptions.SourceRange != nil {
- req.Raw().Header.Set("x-ms-source-range", *blockBlobStageBlockFromURLOptions.SourceRange)
+ if blockBlobClientStageBlockFromURLOptions != nil && blockBlobClientStageBlockFromURLOptions.SourceRange != nil {
+ req.Raw().Header.Set("x-ms-source-range", *blockBlobClientStageBlockFromURLOptions.SourceRange)
}
- if blockBlobStageBlockFromURLOptions != nil && blockBlobStageBlockFromURLOptions.SourceContentMD5 != nil {
- req.Raw().Header.Set("x-ms-source-content-md5", base64.StdEncoding.EncodeToString(blockBlobStageBlockFromURLOptions.SourceContentMD5))
+ if blockBlobClientStageBlockFromURLOptions != nil && blockBlobClientStageBlockFromURLOptions.SourceContentMD5 != nil {
+ req.Raw().Header.Set("x-ms-source-content-md5", base64.StdEncoding.EncodeToString(blockBlobClientStageBlockFromURLOptions.SourceContentMD5))
}
- if blockBlobStageBlockFromURLOptions != nil && blockBlobStageBlockFromURLOptions.SourceContentcrc64 != nil {
- req.Raw().Header.Set("x-ms-source-content-crc64", base64.StdEncoding.EncodeToString(blockBlobStageBlockFromURLOptions.SourceContentcrc64))
+ if blockBlobClientStageBlockFromURLOptions != nil && blockBlobClientStageBlockFromURLOptions.SourceContentcrc64 != nil {
+ req.Raw().Header.Set("x-ms-source-content-crc64", base64.StdEncoding.EncodeToString(blockBlobClientStageBlockFromURLOptions.SourceContentcrc64))
}
if cpkInfo != nil && cpkInfo.EncryptionKey != nil {
req.Raw().Header.Set("x-ms-encryption-key", *cpkInfo.EncryptionKey)
@@ -443,7 +696,7 @@ func (client *blockBlobClient) stageBlockFromURLCreateRequest(ctx context.Contex
req.Raw().Header.Set("x-ms-encryption-key-sha256", *cpkInfo.EncryptionKeySHA256)
}
if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil {
- req.Raw().Header.Set("x-ms-encryption-algorithm", "AES256")
+ req.Raw().Header.Set("x-ms-encryption-algorithm", string(*cpkInfo.EncryptionAlgorithm))
}
if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil {
req.Raw().Header.Set("x-ms-encryption-scope", *cpkScopeInfo.EncryptionScope)
@@ -463,28 +716,31 @@ func (client *blockBlobClient) stageBlockFromURLCreateRequest(ctx context.Contex
if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil {
req.Raw().Header.Set("x-ms-source-if-none-match", *sourceModifiedAccessConditions.SourceIfNoneMatch)
}
- req.Raw().Header.Set("x-ms-version", "2019-12-12")
- if blockBlobStageBlockFromURLOptions != nil && blockBlobStageBlockFromURLOptions.RequestID != nil {
- req.Raw().Header.Set("x-ms-client-request-id", *blockBlobStageBlockFromURLOptions.RequestID)
+ req.Raw().Header.Set("x-ms-version", "2020-10-02")
+ if blockBlobClientStageBlockFromURLOptions != nil && blockBlobClientStageBlockFromURLOptions.RequestID != nil {
+ req.Raw().Header.Set("x-ms-client-request-id", *blockBlobClientStageBlockFromURLOptions.RequestID)
+ }
+ if blockBlobClientStageBlockFromURLOptions != nil && blockBlobClientStageBlockFromURLOptions.CopySourceAuthorization != nil {
+ req.Raw().Header.Set("x-ms-copy-source-authorization", *blockBlobClientStageBlockFromURLOptions.CopySourceAuthorization)
}
req.Raw().Header.Set("Accept", "application/xml")
return req, nil
}
// stageBlockFromURLHandleResponse handles the StageBlockFromURL response.
-func (client *blockBlobClient) stageBlockFromURLHandleResponse(resp *http.Response) (BlockBlobStageBlockFromURLResponse, error) {
- result := BlockBlobStageBlockFromURLResponse{RawResponse: resp}
+func (client *blockBlobClient) stageBlockFromURLHandleResponse(resp *http.Response) (blockBlobClientStageBlockFromURLResponse, error) {
+ result := blockBlobClientStageBlockFromURLResponse{RawResponse: resp}
if val := resp.Header.Get("Content-MD5"); val != "" {
contentMD5, err := base64.StdEncoding.DecodeString(val)
if err != nil {
- return BlockBlobStageBlockFromURLResponse{}, err
+ return blockBlobClientStageBlockFromURLResponse{}, err
}
result.ContentMD5 = contentMD5
}
if val := resp.Header.Get("x-ms-content-crc64"); val != "" {
xMSContentCRC64, err := base64.StdEncoding.DecodeString(val)
if err != nil {
- return BlockBlobStageBlockFromURLResponse{}, err
+ return blockBlobClientStageBlockFromURLResponse{}, err
}
result.XMSContentCRC64 = xMSContentCRC64
}
@@ -500,14 +756,14 @@ func (client *blockBlobClient) stageBlockFromURLHandleResponse(resp *http.Respon
if val := resp.Header.Get("Date"); val != "" {
date, err := time.Parse(time.RFC1123, val)
if err != nil {
- return BlockBlobStageBlockFromURLResponse{}, err
+ return blockBlobClientStageBlockFromURLResponse{}, err
}
result.Date = &date
}
if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" {
isServerEncrypted, err := strconv.ParseBool(val)
if err != nil {
- return BlockBlobStageBlockFromURLResponse{}, err
+ return blockBlobClientStageBlockFromURLResponse{}, err
}
result.IsServerEncrypted = &isServerEncrypted
}
@@ -520,40 +776,49 @@ func (client *blockBlobClient) stageBlockFromURLHandleResponse(resp *http.Respon
return result, nil
}
-// Upload - The Upload Block Blob operation updates the content of an existing block blob. Updating an existing block blob overwrites any existing metadata
-// on the blob. Partial updates are not supported with Put
-// Blob; the content of the existing blob is overwritten with the content of the new blob. To perform a partial update of the content of a block blob, use
-// the Put Block List operation.
-// If the operation fails it returns the *StorageError error type.
-func (client *blockBlobClient) Upload(ctx context.Context, contentLength int64, body io.ReadSeekCloser, blockBlobUploadOptions *BlockBlobUploadOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (BlockBlobUploadResponse, error) {
- req, err := client.uploadCreateRequest(ctx, contentLength, body, blockBlobUploadOptions, blobHTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions)
+// Upload - The Upload Block Blob operation updates the content of an existing block blob. Updating an existing block blob
+// overwrites any existing metadata on the blob. Partial updates are not supported with Put
+// Blob; the content of the existing blob is overwritten with the content of the new blob. To perform a partial update of
+// the content of a block blob, use the Put Block List operation.
+// If the operation fails it returns an *azcore.ResponseError type.
+// contentLength - The length of the request.
+// body - Initial data
+// blockBlobClientUploadOptions - blockBlobClientUploadOptions contains the optional parameters for the blockBlobClient.Upload
+// method.
+// BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the blobClient.SetHTTPHeaders method.
+// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method.
+// CpkInfo - CpkInfo contains a group of parameters for the blobClient.Download method.
+// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the blobClient.SetMetadata method.
+// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method.
+func (client *blockBlobClient) Upload(ctx context.Context, contentLength int64, body io.ReadSeekCloser, blockBlobClientUploadOptions *blockBlobClientUploadOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (blockBlobClientUploadResponse, error) {
+ req, err := client.uploadCreateRequest(ctx, contentLength, body, blockBlobClientUploadOptions, blobHTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions)
if err != nil {
- return BlockBlobUploadResponse{}, err
+ return blockBlobClientUploadResponse{}, err
}
- resp, err := client.con.Pipeline().Do(req)
+ resp, err := client.pl.Do(req)
if err != nil {
- return BlockBlobUploadResponse{}, err
+ return blockBlobClientUploadResponse{}, err
}
if !runtime.HasStatusCode(resp, http.StatusCreated) {
- return BlockBlobUploadResponse{}, runtime.NewResponseError(resp)
+ return blockBlobClientUploadResponse{}, runtime.NewResponseError(resp)
}
return client.uploadHandleResponse(resp)
}
// uploadCreateRequest creates the Upload request.
-func (client *blockBlobClient) uploadCreateRequest(ctx context.Context, contentLength int64, body io.ReadSeekCloser, blockBlobUploadOptions *BlockBlobUploadOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) {
- req, err := runtime.NewRequest(ctx, http.MethodPut, client.con.Endpoint())
+func (client *blockBlobClient) uploadCreateRequest(ctx context.Context, contentLength int64, body io.ReadSeekCloser, blockBlobClientUploadOptions *blockBlobClientUploadOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) {
+ req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint)
if err != nil {
return nil, err
}
reqQP := req.Raw().URL.Query()
- if blockBlobUploadOptions != nil && blockBlobUploadOptions.Timeout != nil {
- reqQP.Set("timeout", strconv.FormatInt(int64(*blockBlobUploadOptions.Timeout), 10))
+ if blockBlobClientUploadOptions != nil && blockBlobClientUploadOptions.Timeout != nil {
+ reqQP.Set("timeout", strconv.FormatInt(int64(*blockBlobClientUploadOptions.Timeout), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
req.Raw().Header.Set("x-ms-blob-type", "BlockBlob")
- if blockBlobUploadOptions != nil && blockBlobUploadOptions.TransactionalContentMD5 != nil {
- req.Raw().Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(blockBlobUploadOptions.TransactionalContentMD5))
+ if blockBlobClientUploadOptions != nil && blockBlobClientUploadOptions.TransactionalContentMD5 != nil {
+ req.Raw().Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(blockBlobClientUploadOptions.TransactionalContentMD5))
}
req.Raw().Header.Set("Content-Length", strconv.FormatInt(contentLength, 10))
if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil {
@@ -571,8 +836,8 @@ func (client *blockBlobClient) uploadCreateRequest(ctx context.Context, contentL
if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil {
req.Raw().Header.Set("x-ms-blob-cache-control", *blobHTTPHeaders.BlobCacheControl)
}
- if blockBlobUploadOptions != nil && blockBlobUploadOptions.Metadata != nil {
- for k, v := range blockBlobUploadOptions.Metadata {
+ if blockBlobClientUploadOptions != nil && blockBlobClientUploadOptions.Metadata != nil {
+ for k, v := range blockBlobClientUploadOptions.Metadata {
req.Raw().Header.Set("x-ms-meta-"+k, v)
}
}
@@ -589,13 +854,13 @@ func (client *blockBlobClient) uploadCreateRequest(ctx context.Context, contentL
req.Raw().Header.Set("x-ms-encryption-key-sha256", *cpkInfo.EncryptionKeySHA256)
}
if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil {
- req.Raw().Header.Set("x-ms-encryption-algorithm", "AES256")
+ req.Raw().Header.Set("x-ms-encryption-algorithm", string(*cpkInfo.EncryptionAlgorithm))
}
if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil {
req.Raw().Header.Set("x-ms-encryption-scope", *cpkScopeInfo.EncryptionScope)
}
- if blockBlobUploadOptions != nil && blockBlobUploadOptions.Tier != nil {
- req.Raw().Header.Set("x-ms-access-tier", string(*blockBlobUploadOptions.Tier))
+ if blockBlobClientUploadOptions != nil && blockBlobClientUploadOptions.Tier != nil {
+ req.Raw().Header.Set("x-ms-access-tier", string(*blockBlobClientUploadOptions.Tier))
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123))
@@ -612,34 +877,43 @@ func (client *blockBlobClient) uploadCreateRequest(ctx context.Context, contentL
if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil {
req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags)
}
- req.Raw().Header.Set("x-ms-version", "2019-12-12")
- if blockBlobUploadOptions != nil && blockBlobUploadOptions.RequestID != nil {
- req.Raw().Header.Set("x-ms-client-request-id", *blockBlobUploadOptions.RequestID)
+ req.Raw().Header.Set("x-ms-version", "2020-10-02")
+ if blockBlobClientUploadOptions != nil && blockBlobClientUploadOptions.RequestID != nil {
+ req.Raw().Header.Set("x-ms-client-request-id", *blockBlobClientUploadOptions.RequestID)
+ }
+ if blockBlobClientUploadOptions != nil && blockBlobClientUploadOptions.BlobTagsString != nil {
+ req.Raw().Header.Set("x-ms-tags", *blockBlobClientUploadOptions.BlobTagsString)
+ }
+ if blockBlobClientUploadOptions != nil && blockBlobClientUploadOptions.ImmutabilityPolicyExpiry != nil {
+ req.Raw().Header.Set("x-ms-immutability-policy-until-date", blockBlobClientUploadOptions.ImmutabilityPolicyExpiry.Format(time.RFC1123))
+ }
+ if blockBlobClientUploadOptions != nil && blockBlobClientUploadOptions.ImmutabilityPolicyMode != nil {
+ req.Raw().Header.Set("x-ms-immutability-policy-mode", string(*blockBlobClientUploadOptions.ImmutabilityPolicyMode))
}
- if blockBlobUploadOptions != nil && blockBlobUploadOptions.BlobTagsString != nil {
- req.Raw().Header.Set("x-ms-tags", *blockBlobUploadOptions.BlobTagsString)
+ if blockBlobClientUploadOptions != nil && blockBlobClientUploadOptions.LegalHold != nil {
+ req.Raw().Header.Set("x-ms-legal-hold", strconv.FormatBool(*blockBlobClientUploadOptions.LegalHold))
}
req.Raw().Header.Set("Accept", "application/xml")
return req, req.SetBody(body, "application/octet-stream")
}
// uploadHandleResponse handles the Upload response.
-func (client *blockBlobClient) uploadHandleResponse(resp *http.Response) (BlockBlobUploadResponse, error) {
- result := BlockBlobUploadResponse{RawResponse: resp}
+func (client *blockBlobClient) uploadHandleResponse(resp *http.Response) (blockBlobClientUploadResponse, error) {
+ result := blockBlobClientUploadResponse{RawResponse: resp}
if val := resp.Header.Get("ETag"); val != "" {
result.ETag = &val
}
if val := resp.Header.Get("Last-Modified"); val != "" {
lastModified, err := time.Parse(time.RFC1123, val)
if err != nil {
- return BlockBlobUploadResponse{}, err
+ return blockBlobClientUploadResponse{}, err
}
result.LastModified = &lastModified
}
if val := resp.Header.Get("Content-MD5"); val != "" {
contentMD5, err := base64.StdEncoding.DecodeString(val)
if err != nil {
- return BlockBlobUploadResponse{}, err
+ return blockBlobClientUploadResponse{}, err
}
result.ContentMD5 = contentMD5
}
@@ -658,14 +932,14 @@ func (client *blockBlobClient) uploadHandleResponse(resp *http.Response) (BlockB
if val := resp.Header.Get("Date"); val != "" {
date, err := time.Parse(time.RFC1123, val)
if err != nil {
- return BlockBlobUploadResponse{}, err
+ return blockBlobClientUploadResponse{}, err
}
result.Date = &date
}
if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" {
isServerEncrypted, err := strconv.ParseBool(val)
if err != nil {
- return BlockBlobUploadResponse{}, err
+ return blockBlobClientUploadResponse{}, err
}
result.IsServerEncrypted = &isServerEncrypted
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_connection.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_connection.go
deleted file mode 100644
index aa11dd8d7c7..00000000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_connection.go
+++ /dev/null
@@ -1,65 +0,0 @@
-//go:build go1.16
-// +build go1.16
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License. See License.txt in the project root for license information.
-// Code generated by Microsoft (R) AutoRest Code Generator.
-// Changes may cause incorrect behavior and will be lost if the code is regenerated.
-
-package azblob
-
-import (
- "github.com/Azure/azure-sdk-for-go/sdk/azcore"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
-)
-
-// connectionOptions contains configuration settings for the connection's pipeline.
-// All zero-value fields will be initialized with their default values.
-type connectionOptions struct {
- // HTTPClient sets the transport for making HTTP requests.
- HTTPClient policy.Transporter
- // Retry configures the built-in retry policy behavior.
- Retry policy.RetryOptions
- // Telemetry configures the built-in telemetry policy behavior.
- Telemetry policy.TelemetryOptions
- // Logging configures the built-in logging policy behavior.
- Logging policy.LogOptions
- // PerCallPolicies contains custom policies to inject into the pipeline.
- // Each policy is executed once per request.
- PerCallPolicies []policy.Policy
- // PerRetryPolicies contains custom policies to inject into the pipeline.
- // Each policy is executed once per request, and for each retry request.
- PerRetryPolicies []policy.Policy
-}
-
-type connection struct {
- u string
- p runtime.Pipeline
-}
-
-// newConnection creates an instance of the connection type with the specified endpoint.
-// Pass nil to accept the default options; this is the same as passing a zero-value options.
-func newConnection(endpoint string, authPolicy policy.Policy, options *azcore.ClientOptions) *connection {
- cp := azcore.ClientOptions{}
- if options != nil {
- cp = *options
- }
- perRetryPolicies := []policy.Policy{}
- if authPolicy != nil {
- perRetryPolicies = append(perRetryPolicies, authPolicy)
- }
- return &connection{u: endpoint, p: runtime.NewPipeline(module, version, runtime.PipelineOptions{
- PerRetry: perRetryPolicies,
- }, &cp)}
-}
-
-// Endpoint returns the connection's endpoint.
-func (c *connection) Endpoint() string {
- return c.u
-}
-
-// Pipeline returns the connection's pipeline.
-func (c *connection) Pipeline() runtime.Pipeline {
- return c.p
-}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_constants.go
index e8bacdcc71c..2348df04a43 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_constants.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_constants.go
@@ -1,5 +1,5 @@
-//go:build go1.16
-// +build go1.16
+//go:build go1.18
+// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
@@ -9,10 +9,11 @@
package azblob
const (
- module = "azblob"
- version = "v0.3.0"
+ moduleName = "azblob"
+ moduleVersion = "v0.4.1"
)
+// AccessTier enum
type AccessTier string
const (
@@ -57,6 +58,7 @@ func (c AccessTier) ToPtr() *AccessTier {
return &c
}
+// AccountKind enum
type AccountKind string
const (
@@ -83,6 +85,7 @@ func (c AccountKind) ToPtr() *AccountKind {
return &c
}
+// ArchiveStatus enum
type ArchiveStatus string
const (
@@ -103,6 +106,7 @@ func (c ArchiveStatus) ToPtr() *ArchiveStatus {
return &c
}
+// BlobExpiryOptions enum
type BlobExpiryOptions string
const (
@@ -127,6 +131,53 @@ func (c BlobExpiryOptions) ToPtr() *BlobExpiryOptions {
return &c
}
+// BlobGeoReplicationStatus - The status of the secondary location
+type BlobGeoReplicationStatus string
+
+const (
+ BlobGeoReplicationStatusLive BlobGeoReplicationStatus = "live"
+ BlobGeoReplicationStatusBootstrap BlobGeoReplicationStatus = "bootstrap"
+ BlobGeoReplicationStatusUnavailable BlobGeoReplicationStatus = "unavailable"
+)
+
+// PossibleBlobGeoReplicationStatusValues returns the possible values for the BlobGeoReplicationStatus const type.
+func PossibleBlobGeoReplicationStatusValues() []BlobGeoReplicationStatus {
+ return []BlobGeoReplicationStatus{
+ BlobGeoReplicationStatusLive,
+ BlobGeoReplicationStatusBootstrap,
+ BlobGeoReplicationStatusUnavailable,
+ }
+}
+
+// ToPtr returns a *BlobGeoReplicationStatus pointing to the current value.
+func (c BlobGeoReplicationStatus) ToPtr() *BlobGeoReplicationStatus {
+ return &c
+}
+
+// BlobImmutabilityPolicyMode enum
+type BlobImmutabilityPolicyMode string
+
+const (
+ BlobImmutabilityPolicyModeMutable BlobImmutabilityPolicyMode = "Mutable"
+ BlobImmutabilityPolicyModeUnlocked BlobImmutabilityPolicyMode = "Unlocked"
+ BlobImmutabilityPolicyModeLocked BlobImmutabilityPolicyMode = "Locked"
+)
+
+// PossibleBlobImmutabilityPolicyModeValues returns the possible values for the BlobImmutabilityPolicyMode const type.
+func PossibleBlobImmutabilityPolicyModeValues() []BlobImmutabilityPolicyMode {
+ return []BlobImmutabilityPolicyMode{
+ BlobImmutabilityPolicyModeMutable,
+ BlobImmutabilityPolicyModeUnlocked,
+ BlobImmutabilityPolicyModeLocked,
+ }
+}
+
+// ToPtr returns a *BlobImmutabilityPolicyMode pointing to the current value.
+func (c BlobImmutabilityPolicyMode) ToPtr() *BlobImmutabilityPolicyMode {
+ return &c
+}
+
+// BlobType enum
type BlobType string
const (
@@ -149,6 +200,7 @@ func (c BlobType) ToPtr() *BlobType {
return &c
}
+// BlockListType enum
type BlockListType string
const (
@@ -171,6 +223,7 @@ func (c BlockListType) ToPtr() *BlockListType {
return &c
}
+// CopyStatusType enum
type CopyStatusType string
const (
@@ -195,6 +248,7 @@ func (c CopyStatusType) ToPtr() *CopyStatusType {
return &c
}
+// DeleteSnapshotsOptionType enum
type DeleteSnapshotsOptionType string
const (
@@ -215,29 +269,28 @@ func (c DeleteSnapshotsOptionType) ToPtr() *DeleteSnapshotsOptionType {
return &c
}
-// GeoReplicationStatusType - The status of the secondary location
-type GeoReplicationStatusType string
+// EncryptionAlgorithmType enum
+type EncryptionAlgorithmType string
const (
- GeoReplicationStatusTypeBootstrap GeoReplicationStatusType = "bootstrap"
- GeoReplicationStatusTypeLive GeoReplicationStatusType = "live"
- GeoReplicationStatusTypeUnavailable GeoReplicationStatusType = "unavailable"
+ EncryptionAlgorithmTypeNone EncryptionAlgorithmType = "None"
+ EncryptionAlgorithmTypeAES256 EncryptionAlgorithmType = "AES256"
)
-// PossibleGeoReplicationStatusTypeValues returns the possible values for the GeoReplicationStatusType const type.
-func PossibleGeoReplicationStatusTypeValues() []GeoReplicationStatusType {
- return []GeoReplicationStatusType{
- GeoReplicationStatusTypeBootstrap,
- GeoReplicationStatusTypeLive,
- GeoReplicationStatusTypeUnavailable,
+// PossibleEncryptionAlgorithmTypeValues returns the possible values for the EncryptionAlgorithmType const type.
+func PossibleEncryptionAlgorithmTypeValues() []EncryptionAlgorithmType {
+ return []EncryptionAlgorithmType{
+ EncryptionAlgorithmTypeNone,
+ EncryptionAlgorithmTypeAES256,
}
}
-// ToPtr returns a *GeoReplicationStatusType pointing to the current value.
-func (c GeoReplicationStatusType) ToPtr() *GeoReplicationStatusType {
+// ToPtr returns a *EncryptionAlgorithmType pointing to the current value.
+func (c EncryptionAlgorithmType) ToPtr() *EncryptionAlgorithmType {
return &c
}
+// LeaseDurationType enum
type LeaseDurationType string
const (
@@ -258,6 +311,7 @@ func (c LeaseDurationType) ToPtr() *LeaseDurationType {
return &c
}
+// LeaseStateType enum
type LeaseStateType string
const (
@@ -284,6 +338,7 @@ func (c LeaseStateType) ToPtr() *LeaseStateType {
return &c
}
+// LeaseStatusType enum
type LeaseStatusType string
const (
@@ -304,16 +359,20 @@ func (c LeaseStatusType) ToPtr() *LeaseStatusType {
return &c
}
+// ListBlobsIncludeItem enum
type ListBlobsIncludeItem string
const (
- ListBlobsIncludeItemCopy ListBlobsIncludeItem = "copy"
- ListBlobsIncludeItemDeleted ListBlobsIncludeItem = "deleted"
- ListBlobsIncludeItemMetadata ListBlobsIncludeItem = "metadata"
- ListBlobsIncludeItemSnapshots ListBlobsIncludeItem = "snapshots"
- ListBlobsIncludeItemUncommittedblobs ListBlobsIncludeItem = "uncommittedblobs"
- ListBlobsIncludeItemVersions ListBlobsIncludeItem = "versions"
- ListBlobsIncludeItemTags ListBlobsIncludeItem = "tags"
+ ListBlobsIncludeItemCopy ListBlobsIncludeItem = "copy"
+ ListBlobsIncludeItemDeleted ListBlobsIncludeItem = "deleted"
+ ListBlobsIncludeItemMetadata ListBlobsIncludeItem = "metadata"
+ ListBlobsIncludeItemSnapshots ListBlobsIncludeItem = "snapshots"
+ ListBlobsIncludeItemUncommittedblobs ListBlobsIncludeItem = "uncommittedblobs"
+ ListBlobsIncludeItemVersions ListBlobsIncludeItem = "versions"
+ ListBlobsIncludeItemTags ListBlobsIncludeItem = "tags"
+ ListBlobsIncludeItemImmutabilitypolicy ListBlobsIncludeItem = "immutabilitypolicy"
+ ListBlobsIncludeItemLegalhold ListBlobsIncludeItem = "legalhold"
+ ListBlobsIncludeItemDeletedwithversions ListBlobsIncludeItem = "deletedwithversions"
)
// PossibleListBlobsIncludeItemValues returns the possible values for the ListBlobsIncludeItem const type.
@@ -326,6 +385,9 @@ func PossibleListBlobsIncludeItemValues() []ListBlobsIncludeItem {
ListBlobsIncludeItemUncommittedblobs,
ListBlobsIncludeItemVersions,
ListBlobsIncludeItemTags,
+ ListBlobsIncludeItemImmutabilitypolicy,
+ ListBlobsIncludeItemLegalhold,
+ ListBlobsIncludeItemDeletedwithversions,
}
}
@@ -334,11 +396,13 @@ func (c ListBlobsIncludeItem) ToPtr() *ListBlobsIncludeItem {
return &c
}
+// ListContainersIncludeType enum
type ListContainersIncludeType string
const (
ListContainersIncludeTypeMetadata ListContainersIncludeType = "metadata"
ListContainersIncludeTypeDeleted ListContainersIncludeType = "deleted"
+ ListContainersIncludeTypeSystem ListContainersIncludeType = "system"
)
// PossibleListContainersIncludeTypeValues returns the possible values for the ListContainersIncludeType const type.
@@ -346,6 +410,7 @@ func PossibleListContainersIncludeTypeValues() []ListContainersIncludeType {
return []ListContainersIncludeType{
ListContainersIncludeTypeMetadata,
ListContainersIncludeTypeDeleted,
+ ListContainersIncludeTypeSystem,
}
}
@@ -354,26 +419,7 @@ func (c ListContainersIncludeType) ToPtr() *ListContainersIncludeType {
return &c
}
-type PathRenameMode string
-
-const (
- PathRenameModeLegacy PathRenameMode = "legacy"
- PathRenameModePosix PathRenameMode = "posix"
-)
-
-// PossiblePathRenameModeValues returns the possible values for the PathRenameMode const type.
-func PossiblePathRenameModeValues() []PathRenameMode {
- return []PathRenameMode{
- PathRenameModeLegacy,
- PathRenameModePosix,
- }
-}
-
-// ToPtr returns a *PathRenameMode pointing to the current value.
-func (c PathRenameMode) ToPtr() *PathRenameMode {
- return &c
-}
-
+// PremiumPageBlobAccessTier enum
type PremiumPageBlobAccessTier string
const (
@@ -412,6 +458,7 @@ func (c PremiumPageBlobAccessTier) ToPtr() *PremiumPageBlobAccessTier {
return &c
}
+// PublicAccessType enum
type PublicAccessType string
const (
@@ -438,6 +485,8 @@ type QueryFormatType string
const (
QueryFormatTypeDelimited QueryFormatType = "delimited"
QueryFormatTypeJSON QueryFormatType = "json"
+ QueryFormatTypeArrow QueryFormatType = "arrow"
+ QueryFormatTypeParquet QueryFormatType = "parquet"
)
// PossibleQueryFormatTypeValues returns the possible values for the QueryFormatType const type.
@@ -445,6 +494,8 @@ func PossibleQueryFormatTypeValues() []QueryFormatType {
return []QueryFormatType{
QueryFormatTypeDelimited,
QueryFormatTypeJSON,
+ QueryFormatTypeArrow,
+ QueryFormatTypeParquet,
}
}
@@ -453,7 +504,8 @@ func (c QueryFormatType) ToPtr() *QueryFormatType {
return &c
}
-// RehydratePriority - If an object is in rehydrate pending state then this header is returned with priority of rehydrate. Valid values are High and Standard.
+// RehydratePriority - If an object is in rehydrate pending state then this header is returned with priority of rehydrate.
+// Valid values are High and Standard.
type RehydratePriority string
const (
@@ -474,6 +526,7 @@ func (c RehydratePriority) ToPtr() *RehydratePriority {
return &c
}
+// SKUName enum
type SKUName string
const (
@@ -500,6 +553,7 @@ func (c SKUName) ToPtr() *SKUName {
return &c
}
+// SequenceNumberActionType enum
type SequenceNumberActionType string
const (
@@ -628,8 +682,8 @@ const (
StorageErrorCodeSequenceNumberConditionNotMet StorageErrorCode = "SequenceNumberConditionNotMet"
StorageErrorCodeSequenceNumberIncrementTooLarge StorageErrorCode = "SequenceNumberIncrementTooLarge"
StorageErrorCodeServerBusy StorageErrorCode = "ServerBusy"
- StorageErrorCodeSnaphotOperationRateExceeded StorageErrorCode = "SnaphotOperationRateExceeded"
StorageErrorCodeSnapshotCountExceeded StorageErrorCode = "SnapshotCountExceeded"
+ StorageErrorCodeSnapshotOperationRateExceeded StorageErrorCode = "SnapshotOperationRateExceeded"
StorageErrorCodeSnapshotsPresent StorageErrorCode = "SnapshotsPresent"
StorageErrorCodeSourceConditionNotMet StorageErrorCode = "SourceConditionNotMet"
StorageErrorCodeSystemInUse StorageErrorCode = "SystemInUse"
@@ -746,8 +800,8 @@ func PossibleStorageErrorCodeValues() []StorageErrorCode {
StorageErrorCodeSequenceNumberConditionNotMet,
StorageErrorCodeSequenceNumberIncrementTooLarge,
StorageErrorCodeServerBusy,
- StorageErrorCodeSnaphotOperationRateExceeded,
StorageErrorCodeSnapshotCountExceeded,
+ StorageErrorCodeSnapshotOperationRateExceeded,
StorageErrorCodeSnapshotsPresent,
StorageErrorCodeSourceConditionNotMet,
StorageErrorCodeSystemInUse,
@@ -764,3 +818,24 @@ func PossibleStorageErrorCodeValues() []StorageErrorCode {
func (c StorageErrorCode) ToPtr() *StorageErrorCode {
return &c
}
+
+// BlobDeleteType enum
+type BlobDeleteType string
+
+const (
+ BlobDeleteTypeNone BlobDeleteType = "None"
+ BlobDeleteTypePermanent BlobDeleteType = "Permanent"
+)
+
+// PossibleBlobDeleteTypeValues returns the possible values for the BlobDeleteType const type.
+func PossibleBlobDeleteTypeValues() []BlobDeleteType {
+ return []BlobDeleteType{
+ BlobDeleteTypeNone,
+ BlobDeleteTypePermanent,
+ }
+}
+
+// ToPtr returns a *BlobDeleteType pointing to the current value.
+func (c BlobDeleteType) ToPtr() *BlobDeleteType {
+ return &c
+}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_container_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_container_client.go
index 81295fe7d61..c9245ce10d4 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_container_client.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_container_client.go
@@ -1,5 +1,5 @@
-//go:build go1.16
-// +build go1.16
+//go:build go1.18
+// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
@@ -12,55 +12,71 @@ import (
"context"
"encoding/xml"
"fmt"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
+ "io"
"net/http"
"strconv"
"strings"
"time"
-
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
)
type containerClient struct {
- con *connection
+ endpoint string
+ pl runtime.Pipeline
}
-// AcquireLease - [Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15 to 60 seconds, or can be infinite
-// If the operation fails it returns the *StorageError error type.
-func (client *containerClient) AcquireLease(ctx context.Context, containerAcquireLeaseOptions *ContainerAcquireLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerAcquireLeaseResponse, error) {
- req, err := client.acquireLeaseCreateRequest(ctx, containerAcquireLeaseOptions, modifiedAccessConditions)
+// newContainerClient creates a new instance of containerClient with the specified values.
+// endpoint - The URL of the service account, container, or blob that is the target of the desired operation.
+// pl - the pipeline used for sending requests and handling responses.
+func newContainerClient(endpoint string, pl runtime.Pipeline) *containerClient {
+ client := &containerClient{
+ endpoint: endpoint,
+ pl: pl,
+ }
+ return client
+}
+
+// AcquireLease - [Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15
+// to 60 seconds, or can be infinite
+// If the operation fails it returns an *azcore.ResponseError type.
+// containerClientAcquireLeaseOptions - containerClientAcquireLeaseOptions contains the optional parameters for the containerClient.AcquireLease
+// method.
+// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method.
+func (client *containerClient) AcquireLease(ctx context.Context, containerClientAcquireLeaseOptions *containerClientAcquireLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (containerClientAcquireLeaseResponse, error) {
+ req, err := client.acquireLeaseCreateRequest(ctx, containerClientAcquireLeaseOptions, modifiedAccessConditions)
if err != nil {
- return ContainerAcquireLeaseResponse{}, err
+ return containerClientAcquireLeaseResponse{}, err
}
- resp, err := client.con.Pipeline().Do(req)
+ resp, err := client.pl.Do(req)
if err != nil {
- return ContainerAcquireLeaseResponse{}, err
+ return containerClientAcquireLeaseResponse{}, err
}
if !runtime.HasStatusCode(resp, http.StatusCreated) {
- return ContainerAcquireLeaseResponse{}, runtime.NewResponseError(resp)
+ return containerClientAcquireLeaseResponse{}, runtime.NewResponseError(resp)
}
return client.acquireLeaseHandleResponse(resp)
}
// acquireLeaseCreateRequest creates the AcquireLease request.
-func (client *containerClient) acquireLeaseCreateRequest(ctx context.Context, containerAcquireLeaseOptions *ContainerAcquireLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) {
- req, err := runtime.NewRequest(ctx, http.MethodPut, client.con.Endpoint())
+func (client *containerClient) acquireLeaseCreateRequest(ctx context.Context, containerClientAcquireLeaseOptions *containerClientAcquireLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) {
+ req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint)
if err != nil {
return nil, err
}
reqQP := req.Raw().URL.Query()
reqQP.Set("comp", "lease")
reqQP.Set("restype", "container")
- if containerAcquireLeaseOptions != nil && containerAcquireLeaseOptions.Timeout != nil {
- reqQP.Set("timeout", strconv.FormatInt(int64(*containerAcquireLeaseOptions.Timeout), 10))
+ if containerClientAcquireLeaseOptions != nil && containerClientAcquireLeaseOptions.Timeout != nil {
+ reqQP.Set("timeout", strconv.FormatInt(int64(*containerClientAcquireLeaseOptions.Timeout), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
req.Raw().Header.Set("x-ms-lease-action", "acquire")
- if containerAcquireLeaseOptions != nil && containerAcquireLeaseOptions.Duration != nil {
- req.Raw().Header.Set("x-ms-lease-duration", strconv.FormatInt(int64(*containerAcquireLeaseOptions.Duration), 10))
+ if containerClientAcquireLeaseOptions != nil && containerClientAcquireLeaseOptions.Duration != nil {
+ req.Raw().Header.Set("x-ms-lease-duration", strconv.FormatInt(int64(*containerClientAcquireLeaseOptions.Duration), 10))
}
- if containerAcquireLeaseOptions != nil && containerAcquireLeaseOptions.ProposedLeaseID != nil {
- req.Raw().Header.Set("x-ms-proposed-lease-id", *containerAcquireLeaseOptions.ProposedLeaseID)
+ if containerClientAcquireLeaseOptions != nil && containerClientAcquireLeaseOptions.ProposedLeaseID != nil {
+ req.Raw().Header.Set("x-ms-proposed-lease-id", *containerClientAcquireLeaseOptions.ProposedLeaseID)
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123))
@@ -68,24 +84,24 @@ func (client *containerClient) acquireLeaseCreateRequest(ctx context.Context, co
if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123))
}
- req.Raw().Header.Set("x-ms-version", "2019-12-12")
- if containerAcquireLeaseOptions != nil && containerAcquireLeaseOptions.RequestID != nil {
- req.Raw().Header.Set("x-ms-client-request-id", *containerAcquireLeaseOptions.RequestID)
+ req.Raw().Header.Set("x-ms-version", "2020-10-02")
+ if containerClientAcquireLeaseOptions != nil && containerClientAcquireLeaseOptions.RequestID != nil {
+ req.Raw().Header.Set("x-ms-client-request-id", *containerClientAcquireLeaseOptions.RequestID)
}
req.Raw().Header.Set("Accept", "application/xml")
return req, nil
}
// acquireLeaseHandleResponse handles the AcquireLease response.
-func (client *containerClient) acquireLeaseHandleResponse(resp *http.Response) (ContainerAcquireLeaseResponse, error) {
- result := ContainerAcquireLeaseResponse{RawResponse: resp}
+func (client *containerClient) acquireLeaseHandleResponse(resp *http.Response) (containerClientAcquireLeaseResponse, error) {
+ result := containerClientAcquireLeaseResponse{RawResponse: resp}
if val := resp.Header.Get("ETag"); val != "" {
result.ETag = &val
}
if val := resp.Header.Get("Last-Modified"); val != "" {
lastModified, err := time.Parse(time.RFC1123, val)
if err != nil {
- return ContainerAcquireLeaseResponse{}, err
+ return containerClientAcquireLeaseResponse{}, err
}
result.LastModified = &lastModified
}
@@ -104,46 +120,50 @@ func (client *containerClient) acquireLeaseHandleResponse(resp *http.Response) (
if val := resp.Header.Get("Date"); val != "" {
date, err := time.Parse(time.RFC1123, val)
if err != nil {
- return ContainerAcquireLeaseResponse{}, err
+ return containerClientAcquireLeaseResponse{}, err
}
result.Date = &date
}
return result, nil
}
-// BreakLease - [Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15 to 60 seconds, or can be infinite
-// If the operation fails it returns the *StorageError error type.
-func (client *containerClient) BreakLease(ctx context.Context, containerBreakLeaseOptions *ContainerBreakLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerBreakLeaseResponse, error) {
- req, err := client.breakLeaseCreateRequest(ctx, containerBreakLeaseOptions, modifiedAccessConditions)
+// BreakLease - [Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15
+// to 60 seconds, or can be infinite
+// If the operation fails it returns an *azcore.ResponseError type.
+// containerClientBreakLeaseOptions - containerClientBreakLeaseOptions contains the optional parameters for the containerClient.BreakLease
+// method.
+// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method.
+func (client *containerClient) BreakLease(ctx context.Context, containerClientBreakLeaseOptions *containerClientBreakLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (containerClientBreakLeaseResponse, error) {
+ req, err := client.breakLeaseCreateRequest(ctx, containerClientBreakLeaseOptions, modifiedAccessConditions)
if err != nil {
- return ContainerBreakLeaseResponse{}, err
+ return containerClientBreakLeaseResponse{}, err
}
- resp, err := client.con.Pipeline().Do(req)
+ resp, err := client.pl.Do(req)
if err != nil {
- return ContainerBreakLeaseResponse{}, err
+ return containerClientBreakLeaseResponse{}, err
}
if !runtime.HasStatusCode(resp, http.StatusAccepted) {
- return ContainerBreakLeaseResponse{}, runtime.NewResponseError(resp)
+ return containerClientBreakLeaseResponse{}, runtime.NewResponseError(resp)
}
return client.breakLeaseHandleResponse(resp)
}
// breakLeaseCreateRequest creates the BreakLease request.
-func (client *containerClient) breakLeaseCreateRequest(ctx context.Context, containerBreakLeaseOptions *ContainerBreakLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) {
- req, err := runtime.NewRequest(ctx, http.MethodPut, client.con.Endpoint())
+func (client *containerClient) breakLeaseCreateRequest(ctx context.Context, containerClientBreakLeaseOptions *containerClientBreakLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) {
+ req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint)
if err != nil {
return nil, err
}
reqQP := req.Raw().URL.Query()
reqQP.Set("comp", "lease")
reqQP.Set("restype", "container")
- if containerBreakLeaseOptions != nil && containerBreakLeaseOptions.Timeout != nil {
- reqQP.Set("timeout", strconv.FormatInt(int64(*containerBreakLeaseOptions.Timeout), 10))
+ if containerClientBreakLeaseOptions != nil && containerClientBreakLeaseOptions.Timeout != nil {
+ reqQP.Set("timeout", strconv.FormatInt(int64(*containerClientBreakLeaseOptions.Timeout), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
req.Raw().Header.Set("x-ms-lease-action", "break")
- if containerBreakLeaseOptions != nil && containerBreakLeaseOptions.BreakPeriod != nil {
- req.Raw().Header.Set("x-ms-lease-break-period", strconv.FormatInt(int64(*containerBreakLeaseOptions.BreakPeriod), 10))
+ if containerClientBreakLeaseOptions != nil && containerClientBreakLeaseOptions.BreakPeriod != nil {
+ req.Raw().Header.Set("x-ms-lease-break-period", strconv.FormatInt(int64(*containerClientBreakLeaseOptions.BreakPeriod), 10))
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123))
@@ -151,24 +171,24 @@ func (client *containerClient) breakLeaseCreateRequest(ctx context.Context, cont
if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123))
}
- req.Raw().Header.Set("x-ms-version", "2019-12-12")
- if containerBreakLeaseOptions != nil && containerBreakLeaseOptions.RequestID != nil {
- req.Raw().Header.Set("x-ms-client-request-id", *containerBreakLeaseOptions.RequestID)
+ req.Raw().Header.Set("x-ms-version", "2020-10-02")
+ if containerClientBreakLeaseOptions != nil && containerClientBreakLeaseOptions.RequestID != nil {
+ req.Raw().Header.Set("x-ms-client-request-id", *containerClientBreakLeaseOptions.RequestID)
}
req.Raw().Header.Set("Accept", "application/xml")
return req, nil
}
// breakLeaseHandleResponse handles the BreakLease response.
-func (client *containerClient) breakLeaseHandleResponse(resp *http.Response) (ContainerBreakLeaseResponse, error) {
- result := ContainerBreakLeaseResponse{RawResponse: resp}
+func (client *containerClient) breakLeaseHandleResponse(resp *http.Response) (containerClientBreakLeaseResponse, error) {
+ result := containerClientBreakLeaseResponse{RawResponse: resp}
if val := resp.Header.Get("ETag"); val != "" {
result.ETag = &val
}
if val := resp.Header.Get("Last-Modified"); val != "" {
lastModified, err := time.Parse(time.RFC1123, val)
if err != nil {
- return ContainerBreakLeaseResponse{}, err
+ return containerClientBreakLeaseResponse{}, err
}
result.LastModified = &lastModified
}
@@ -176,7 +196,7 @@ func (client *containerClient) breakLeaseHandleResponse(resp *http.Response) (Co
leaseTime32, err := strconv.ParseInt(val, 10, 32)
leaseTime := int32(leaseTime32)
if err != nil {
- return ContainerBreakLeaseResponse{}, err
+ return containerClientBreakLeaseResponse{}, err
}
result.LeaseTime = &leaseTime
}
@@ -192,41 +212,49 @@ func (client *containerClient) breakLeaseHandleResponse(resp *http.Response) (Co
if val := resp.Header.Get("Date"); val != "" {
date, err := time.Parse(time.RFC1123, val)
if err != nil {
- return ContainerBreakLeaseResponse{}, err
+ return containerClientBreakLeaseResponse{}, err
}
result.Date = &date
}
return result, nil
}
-// ChangeLease - [Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15 to 60 seconds, or can be infinite
-// If the operation fails it returns the *StorageError error type.
-func (client *containerClient) ChangeLease(ctx context.Context, leaseID string, proposedLeaseID string, containerChangeLeaseOptions *ContainerChangeLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerChangeLeaseResponse, error) {
- req, err := client.changeLeaseCreateRequest(ctx, leaseID, proposedLeaseID, containerChangeLeaseOptions, modifiedAccessConditions)
+// ChangeLease - [Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15
+// to 60 seconds, or can be infinite
+// If the operation fails it returns an *azcore.ResponseError type.
+// leaseID - Specifies the current lease ID on the resource.
+// proposedLeaseID - Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed
+// lease ID is not in the correct format. See Guid Constructor (String) for a list of valid GUID
+// string formats.
+// containerClientChangeLeaseOptions - containerClientChangeLeaseOptions contains the optional parameters for the containerClient.ChangeLease
+// method.
+// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method.
+func (client *containerClient) ChangeLease(ctx context.Context, leaseID string, proposedLeaseID string, containerClientChangeLeaseOptions *containerClientChangeLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (containerClientChangeLeaseResponse, error) {
+ req, err := client.changeLeaseCreateRequest(ctx, leaseID, proposedLeaseID, containerClientChangeLeaseOptions, modifiedAccessConditions)
if err != nil {
- return ContainerChangeLeaseResponse{}, err
+ return containerClientChangeLeaseResponse{}, err
}
- resp, err := client.con.Pipeline().Do(req)
+ resp, err := client.pl.Do(req)
if err != nil {
- return ContainerChangeLeaseResponse{}, err
+ return containerClientChangeLeaseResponse{}, err
}
if !runtime.HasStatusCode(resp, http.StatusOK) {
- return ContainerChangeLeaseResponse{}, runtime.NewResponseError(resp)
+ return containerClientChangeLeaseResponse{}, runtime.NewResponseError(resp)
}
return client.changeLeaseHandleResponse(resp)
}
// changeLeaseCreateRequest creates the ChangeLease request.
-func (client *containerClient) changeLeaseCreateRequest(ctx context.Context, leaseID string, proposedLeaseID string, containerChangeLeaseOptions *ContainerChangeLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) {
- req, err := runtime.NewRequest(ctx, http.MethodPut, client.con.Endpoint())
+func (client *containerClient) changeLeaseCreateRequest(ctx context.Context, leaseID string, proposedLeaseID string, containerClientChangeLeaseOptions *containerClientChangeLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) {
+ req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint)
if err != nil {
return nil, err
}
reqQP := req.Raw().URL.Query()
reqQP.Set("comp", "lease")
reqQP.Set("restype", "container")
- if containerChangeLeaseOptions != nil && containerChangeLeaseOptions.Timeout != nil {
- reqQP.Set("timeout", strconv.FormatInt(int64(*containerChangeLeaseOptions.Timeout), 10))
+ if containerClientChangeLeaseOptions != nil && containerClientChangeLeaseOptions.Timeout != nil {
+ reqQP.Set("timeout", strconv.FormatInt(int64(*containerClientChangeLeaseOptions.Timeout), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
req.Raw().Header.Set("x-ms-lease-action", "change")
@@ -238,24 +266,24 @@ func (client *containerClient) changeLeaseCreateRequest(ctx context.Context, lea
if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123))
}
- req.Raw().Header.Set("x-ms-version", "2019-12-12")
- if containerChangeLeaseOptions != nil && containerChangeLeaseOptions.RequestID != nil {
- req.Raw().Header.Set("x-ms-client-request-id", *containerChangeLeaseOptions.RequestID)
+ req.Raw().Header.Set("x-ms-version", "2020-10-02")
+ if containerClientChangeLeaseOptions != nil && containerClientChangeLeaseOptions.RequestID != nil {
+ req.Raw().Header.Set("x-ms-client-request-id", *containerClientChangeLeaseOptions.RequestID)
}
req.Raw().Header.Set("Accept", "application/xml")
return req, nil
}
// changeLeaseHandleResponse handles the ChangeLease response.
-func (client *containerClient) changeLeaseHandleResponse(resp *http.Response) (ContainerChangeLeaseResponse, error) {
- result := ContainerChangeLeaseResponse{RawResponse: resp}
+func (client *containerClient) changeLeaseHandleResponse(resp *http.Response) (containerClientChangeLeaseResponse, error) {
+ result := containerClientChangeLeaseResponse{RawResponse: resp}
if val := resp.Header.Get("ETag"); val != "" {
result.ETag = &val
}
if val := resp.Header.Get("Last-Modified"); val != "" {
lastModified, err := time.Parse(time.RFC1123, val)
if err != nil {
- return ContainerChangeLeaseResponse{}, err
+ return containerClientChangeLeaseResponse{}, err
}
result.LastModified = &lastModified
}
@@ -274,53 +302,57 @@ func (client *containerClient) changeLeaseHandleResponse(resp *http.Response) (C
if val := resp.Header.Get("Date"); val != "" {
date, err := time.Parse(time.RFC1123, val)
if err != nil {
- return ContainerChangeLeaseResponse{}, err
+ return containerClientChangeLeaseResponse{}, err
}
result.Date = &date
}
return result, nil
}
-// Create - creates a new container under the specified account. If the container with the same name already exists, the operation fails
-// If the operation fails it returns the *StorageError error type.
-func (client *containerClient) Create(ctx context.Context, containerCreateOptions *ContainerCreateOptions, containerCpkScopeInfo *ContainerCpkScopeInfo) (ContainerCreateResponse, error) {
- req, err := client.createCreateRequest(ctx, containerCreateOptions, containerCpkScopeInfo)
+// Create - creates a new container under the specified account. If the container with the same name already exists, the operation
+// fails
+// If the operation fails it returns an *azcore.ResponseError type.
+// containerClientCreateOptions - containerClientCreateOptions contains the optional parameters for the containerClient.Create
+// method.
+// ContainerCpkScopeInfo - ContainerCpkScopeInfo contains a group of parameters for the containerClient.Create method.
+func (client *containerClient) Create(ctx context.Context, containerClientCreateOptions *containerClientCreateOptions, containerCpkScopeInfo *ContainerCpkScopeInfo) (containerClientCreateResponse, error) {
+ req, err := client.createCreateRequest(ctx, containerClientCreateOptions, containerCpkScopeInfo)
if err != nil {
- return ContainerCreateResponse{}, err
+ return containerClientCreateResponse{}, err
}
- resp, err := client.con.Pipeline().Do(req)
+ resp, err := client.pl.Do(req)
if err != nil {
- return ContainerCreateResponse{}, err
+ return containerClientCreateResponse{}, err
}
if !runtime.HasStatusCode(resp, http.StatusCreated) {
- return ContainerCreateResponse{}, runtime.NewResponseError(resp)
+ return containerClientCreateResponse{}, runtime.NewResponseError(resp)
}
return client.createHandleResponse(resp)
}
// createCreateRequest creates the Create request.
-func (client *containerClient) createCreateRequest(ctx context.Context, containerCreateOptions *ContainerCreateOptions, containerCpkScopeInfo *ContainerCpkScopeInfo) (*policy.Request, error) {
- req, err := runtime.NewRequest(ctx, http.MethodPut, client.con.Endpoint())
+func (client *containerClient) createCreateRequest(ctx context.Context, containerClientCreateOptions *containerClientCreateOptions, containerCpkScopeInfo *ContainerCpkScopeInfo) (*policy.Request, error) {
+ req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint)
if err != nil {
return nil, err
}
reqQP := req.Raw().URL.Query()
reqQP.Set("restype", "container")
- if containerCreateOptions != nil && containerCreateOptions.Timeout != nil {
- reqQP.Set("timeout", strconv.FormatInt(int64(*containerCreateOptions.Timeout), 10))
+ if containerClientCreateOptions != nil && containerClientCreateOptions.Timeout != nil {
+ reqQP.Set("timeout", strconv.FormatInt(int64(*containerClientCreateOptions.Timeout), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
- if containerCreateOptions != nil && containerCreateOptions.Metadata != nil {
- for k, v := range containerCreateOptions.Metadata {
+ if containerClientCreateOptions != nil && containerClientCreateOptions.Metadata != nil {
+ for k, v := range containerClientCreateOptions.Metadata {
req.Raw().Header.Set("x-ms-meta-"+k, v)
}
}
- if containerCreateOptions != nil && containerCreateOptions.Access != nil {
- req.Raw().Header.Set("x-ms-blob-public-access", string(*containerCreateOptions.Access))
+ if containerClientCreateOptions != nil && containerClientCreateOptions.Access != nil {
+ req.Raw().Header.Set("x-ms-blob-public-access", string(*containerClientCreateOptions.Access))
}
- req.Raw().Header.Set("x-ms-version", "2019-12-12")
- if containerCreateOptions != nil && containerCreateOptions.RequestID != nil {
- req.Raw().Header.Set("x-ms-client-request-id", *containerCreateOptions.RequestID)
+ req.Raw().Header.Set("x-ms-version", "2020-10-02")
+ if containerClientCreateOptions != nil && containerClientCreateOptions.RequestID != nil {
+ req.Raw().Header.Set("x-ms-client-request-id", *containerClientCreateOptions.RequestID)
}
if containerCpkScopeInfo != nil && containerCpkScopeInfo.DefaultEncryptionScope != nil {
req.Raw().Header.Set("x-ms-default-encryption-scope", *containerCpkScopeInfo.DefaultEncryptionScope)
@@ -333,15 +365,15 @@ func (client *containerClient) createCreateRequest(ctx context.Context, containe
}
// createHandleResponse handles the Create response.
-func (client *containerClient) createHandleResponse(resp *http.Response) (ContainerCreateResponse, error) {
- result := ContainerCreateResponse{RawResponse: resp}
+func (client *containerClient) createHandleResponse(resp *http.Response) (containerClientCreateResponse, error) {
+ result := containerClientCreateResponse{RawResponse: resp}
if val := resp.Header.Get("ETag"); val != "" {
result.ETag = &val
}
if val := resp.Header.Get("Last-Modified"); val != "" {
lastModified, err := time.Parse(time.RFC1123, val)
if err != nil {
- return ContainerCreateResponse{}, err
+ return containerClientCreateResponse{}, err
}
result.LastModified = &lastModified
}
@@ -357,40 +389,45 @@ func (client *containerClient) createHandleResponse(resp *http.Response) (Contai
if val := resp.Header.Get("Date"); val != "" {
date, err := time.Parse(time.RFC1123, val)
if err != nil {
- return ContainerCreateResponse{}, err
+ return containerClientCreateResponse{}, err
}
result.Date = &date
}
return result, nil
}
-// Delete - operation marks the specified container for deletion. The container and any blobs contained within it are later deleted during garbage collection
-// If the operation fails it returns the *StorageError error type.
-func (client *containerClient) Delete(ctx context.Context, containerDeleteOptions *ContainerDeleteOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerDeleteResponse, error) {
- req, err := client.deleteCreateRequest(ctx, containerDeleteOptions, leaseAccessConditions, modifiedAccessConditions)
+// Delete - operation marks the specified container for deletion. The container and any blobs contained within it are later
+// deleted during garbage collection
+// If the operation fails it returns an *azcore.ResponseError type.
+// containerClientDeleteOptions - containerClientDeleteOptions contains the optional parameters for the containerClient.Delete
+// method.
+// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method.
+// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method.
+func (client *containerClient) Delete(ctx context.Context, containerClientDeleteOptions *containerClientDeleteOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (containerClientDeleteResponse, error) {
+ req, err := client.deleteCreateRequest(ctx, containerClientDeleteOptions, leaseAccessConditions, modifiedAccessConditions)
if err != nil {
- return ContainerDeleteResponse{}, err
+ return containerClientDeleteResponse{}, err
}
- resp, err := client.con.Pipeline().Do(req)
+ resp, err := client.pl.Do(req)
if err != nil {
- return ContainerDeleteResponse{}, err
+ return containerClientDeleteResponse{}, err
}
if !runtime.HasStatusCode(resp, http.StatusAccepted) {
- return ContainerDeleteResponse{}, runtime.NewResponseError(resp)
+ return containerClientDeleteResponse{}, runtime.NewResponseError(resp)
}
return client.deleteHandleResponse(resp)
}
// deleteCreateRequest creates the Delete request.
-func (client *containerClient) deleteCreateRequest(ctx context.Context, containerDeleteOptions *ContainerDeleteOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) {
- req, err := runtime.NewRequest(ctx, http.MethodDelete, client.con.Endpoint())
+func (client *containerClient) deleteCreateRequest(ctx context.Context, containerClientDeleteOptions *containerClientDeleteOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) {
+ req, err := runtime.NewRequest(ctx, http.MethodDelete, client.endpoint)
if err != nil {
return nil, err
}
reqQP := req.Raw().URL.Query()
reqQP.Set("restype", "container")
- if containerDeleteOptions != nil && containerDeleteOptions.Timeout != nil {
- reqQP.Set("timeout", strconv.FormatInt(int64(*containerDeleteOptions.Timeout), 10))
+ if containerClientDeleteOptions != nil && containerClientDeleteOptions.Timeout != nil {
+ reqQP.Set("timeout", strconv.FormatInt(int64(*containerClientDeleteOptions.Timeout), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
@@ -402,17 +439,17 @@ func (client *containerClient) deleteCreateRequest(ctx context.Context, containe
if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123))
}
- req.Raw().Header.Set("x-ms-version", "2019-12-12")
- if containerDeleteOptions != nil && containerDeleteOptions.RequestID != nil {
- req.Raw().Header.Set("x-ms-client-request-id", *containerDeleteOptions.RequestID)
+ req.Raw().Header.Set("x-ms-version", "2020-10-02")
+ if containerClientDeleteOptions != nil && containerClientDeleteOptions.RequestID != nil {
+ req.Raw().Header.Set("x-ms-client-request-id", *containerClientDeleteOptions.RequestID)
}
req.Raw().Header.Set("Accept", "application/xml")
return req, nil
}
// deleteHandleResponse handles the Delete response.
-func (client *containerClient) deleteHandleResponse(resp *http.Response) (ContainerDeleteResponse, error) {
- result := ContainerDeleteResponse{RawResponse: resp}
+func (client *containerClient) deleteHandleResponse(resp *http.Response) (containerClientDeleteResponse, error) {
+ result := containerClientDeleteResponse{RawResponse: resp}
if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
result.ClientRequestID = &val
}
@@ -425,57 +462,61 @@ func (client *containerClient) deleteHandleResponse(resp *http.Response) (Contai
if val := resp.Header.Get("Date"); val != "" {
date, err := time.Parse(time.RFC1123, val)
if err != nil {
- return ContainerDeleteResponse{}, err
+ return containerClientDeleteResponse{}, err
}
result.Date = &date
}
return result, nil
}
-// GetAccessPolicy - gets the permissions for the specified container. The permissions indicate whether container data may be accessed publicly.
-// If the operation fails it returns the *StorageError error type.
-func (client *containerClient) GetAccessPolicy(ctx context.Context, containerGetAccessPolicyOptions *ContainerGetAccessPolicyOptions, leaseAccessConditions *LeaseAccessConditions) (ContainerGetAccessPolicyResponse, error) {
- req, err := client.getAccessPolicyCreateRequest(ctx, containerGetAccessPolicyOptions, leaseAccessConditions)
+// GetAccessPolicy - gets the permissions for the specified container. The permissions indicate whether container data may
+// be accessed publicly.
+// If the operation fails it returns an *azcore.ResponseError type.
+// containerClientGetAccessPolicyOptions - containerClientGetAccessPolicyOptions contains the optional parameters for the
+// containerClient.GetAccessPolicy method.
+// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method.
+func (client *containerClient) GetAccessPolicy(ctx context.Context, containerClientGetAccessPolicyOptions *containerClientGetAccessPolicyOptions, leaseAccessConditions *LeaseAccessConditions) (containerClientGetAccessPolicyResponse, error) {
+ req, err := client.getAccessPolicyCreateRequest(ctx, containerClientGetAccessPolicyOptions, leaseAccessConditions)
if err != nil {
- return ContainerGetAccessPolicyResponse{}, err
+ return containerClientGetAccessPolicyResponse{}, err
}
- resp, err := client.con.Pipeline().Do(req)
+ resp, err := client.pl.Do(req)
if err != nil {
- return ContainerGetAccessPolicyResponse{}, err
+ return containerClientGetAccessPolicyResponse{}, err
}
if !runtime.HasStatusCode(resp, http.StatusOK) {
- return ContainerGetAccessPolicyResponse{}, runtime.NewResponseError(resp)
+ return containerClientGetAccessPolicyResponse{}, runtime.NewResponseError(resp)
}
return client.getAccessPolicyHandleResponse(resp)
}
// getAccessPolicyCreateRequest creates the GetAccessPolicy request.
-func (client *containerClient) getAccessPolicyCreateRequest(ctx context.Context, containerGetAccessPolicyOptions *ContainerGetAccessPolicyOptions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) {
- req, err := runtime.NewRequest(ctx, http.MethodGet, client.con.Endpoint())
+func (client *containerClient) getAccessPolicyCreateRequest(ctx context.Context, containerClientGetAccessPolicyOptions *containerClientGetAccessPolicyOptions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) {
+ req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint)
if err != nil {
return nil, err
}
reqQP := req.Raw().URL.Query()
reqQP.Set("restype", "container")
reqQP.Set("comp", "acl")
- if containerGetAccessPolicyOptions != nil && containerGetAccessPolicyOptions.Timeout != nil {
- reqQP.Set("timeout", strconv.FormatInt(int64(*containerGetAccessPolicyOptions.Timeout), 10))
+ if containerClientGetAccessPolicyOptions != nil && containerClientGetAccessPolicyOptions.Timeout != nil {
+ reqQP.Set("timeout", strconv.FormatInt(int64(*containerClientGetAccessPolicyOptions.Timeout), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID)
}
- req.Raw().Header.Set("x-ms-version", "2019-12-12")
- if containerGetAccessPolicyOptions != nil && containerGetAccessPolicyOptions.RequestID != nil {
- req.Raw().Header.Set("x-ms-client-request-id", *containerGetAccessPolicyOptions.RequestID)
+ req.Raw().Header.Set("x-ms-version", "2020-10-02")
+ if containerClientGetAccessPolicyOptions != nil && containerClientGetAccessPolicyOptions.RequestID != nil {
+ req.Raw().Header.Set("x-ms-client-request-id", *containerClientGetAccessPolicyOptions.RequestID)
}
req.Raw().Header.Set("Accept", "application/xml")
return req, nil
}
// getAccessPolicyHandleResponse handles the GetAccessPolicy response.
-func (client *containerClient) getAccessPolicyHandleResponse(resp *http.Response) (ContainerGetAccessPolicyResponse, error) {
- result := ContainerGetAccessPolicyResponse{RawResponse: resp}
+func (client *containerClient) getAccessPolicyHandleResponse(resp *http.Response) (containerClientGetAccessPolicyResponse, error) {
+ result := containerClientGetAccessPolicyResponse{RawResponse: resp}
if val := resp.Header.Get("x-ms-blob-public-access"); val != "" {
result.BlobPublicAccess = (*PublicAccessType)(&val)
}
@@ -485,7 +526,7 @@ func (client *containerClient) getAccessPolicyHandleResponse(resp *http.Response
if val := resp.Header.Get("Last-Modified"); val != "" {
lastModified, err := time.Parse(time.RFC1123, val)
if err != nil {
- return ContainerGetAccessPolicyResponse{}, err
+ return containerClientGetAccessPolicyResponse{}, err
}
result.LastModified = &lastModified
}
@@ -501,36 +542,38 @@ func (client *containerClient) getAccessPolicyHandleResponse(resp *http.Response
if val := resp.Header.Get("Date"); val != "" {
date, err := time.Parse(time.RFC1123, val)
if err != nil {
- return ContainerGetAccessPolicyResponse{}, err
+ return containerClientGetAccessPolicyResponse{}, err
}
result.Date = &date
}
if err := runtime.UnmarshalAsXML(resp, &result); err != nil {
- return ContainerGetAccessPolicyResponse{}, err
+ return containerClientGetAccessPolicyResponse{}, err
}
return result, nil
}
// GetAccountInfo - Returns the sku name and account kind
-// If the operation fails it returns the *StorageError error type.
-func (client *containerClient) GetAccountInfo(ctx context.Context, options *ContainerGetAccountInfoOptions) (ContainerGetAccountInfoResponse, error) {
+// If the operation fails it returns an *azcore.ResponseError type.
+// options - containerClientGetAccountInfoOptions contains the optional parameters for the containerClient.GetAccountInfo
+// method.
+func (client *containerClient) GetAccountInfo(ctx context.Context, options *containerClientGetAccountInfoOptions) (containerClientGetAccountInfoResponse, error) {
req, err := client.getAccountInfoCreateRequest(ctx, options)
if err != nil {
- return ContainerGetAccountInfoResponse{}, err
+ return containerClientGetAccountInfoResponse{}, err
}
- resp, err := client.con.Pipeline().Do(req)
+ resp, err := client.pl.Do(req)
if err != nil {
- return ContainerGetAccountInfoResponse{}, err
+ return containerClientGetAccountInfoResponse{}, err
}
if !runtime.HasStatusCode(resp, http.StatusOK) {
- return ContainerGetAccountInfoResponse{}, runtime.NewResponseError(resp)
+ return containerClientGetAccountInfoResponse{}, runtime.NewResponseError(resp)
}
return client.getAccountInfoHandleResponse(resp)
}
// getAccountInfoCreateRequest creates the GetAccountInfo request.
-func (client *containerClient) getAccountInfoCreateRequest(ctx context.Context, options *ContainerGetAccountInfoOptions) (*policy.Request, error) {
- req, err := runtime.NewRequest(ctx, http.MethodGet, client.con.Endpoint())
+func (client *containerClient) getAccountInfoCreateRequest(ctx context.Context, options *containerClientGetAccountInfoOptions) (*policy.Request, error) {
+ req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint)
if err != nil {
return nil, err
}
@@ -538,14 +581,14 @@ func (client *containerClient) getAccountInfoCreateRequest(ctx context.Context,
reqQP.Set("restype", "account")
reqQP.Set("comp", "properties")
req.Raw().URL.RawQuery = reqQP.Encode()
- req.Raw().Header.Set("x-ms-version", "2019-12-12")
+ req.Raw().Header.Set("x-ms-version", "2020-10-02")
req.Raw().Header.Set("Accept", "application/xml")
return req, nil
}
// getAccountInfoHandleResponse handles the GetAccountInfo response.
-func (client *containerClient) getAccountInfoHandleResponse(resp *http.Response) (ContainerGetAccountInfoResponse, error) {
- result := ContainerGetAccountInfoResponse{RawResponse: resp}
+func (client *containerClient) getAccountInfoHandleResponse(resp *http.Response) (containerClientGetAccountInfoResponse, error) {
+ result := containerClientGetAccountInfoResponse{RawResponse: resp}
if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
result.ClientRequestID = &val
}
@@ -558,7 +601,7 @@ func (client *containerClient) getAccountInfoHandleResponse(resp *http.Response)
if val := resp.Header.Get("Date"); val != "" {
date, err := time.Parse(time.RFC1123, val)
if err != nil {
- return ContainerGetAccountInfoResponse{}, err
+ return containerClientGetAccountInfoResponse{}, err
}
result.Date = &date
}
@@ -571,50 +614,53 @@ func (client *containerClient) getAccountInfoHandleResponse(resp *http.Response)
return result, nil
}
-// GetProperties - returns all user-defined metadata and system properties for the specified container. The data returned does not include the container's
-// list of blobs
-// If the operation fails it returns the *StorageError error type.
-func (client *containerClient) GetProperties(ctx context.Context, containerGetPropertiesOptions *ContainerGetPropertiesOptions, leaseAccessConditions *LeaseAccessConditions) (ContainerGetPropertiesResponse, error) {
- req, err := client.getPropertiesCreateRequest(ctx, containerGetPropertiesOptions, leaseAccessConditions)
+// GetProperties - returns all user-defined metadata and system properties for the specified container. The data returned
+// does not include the container's list of blobs
+// If the operation fails it returns an *azcore.ResponseError type.
+// containerClientGetPropertiesOptions - containerClientGetPropertiesOptions contains the optional parameters for the containerClient.GetProperties
+// method.
+// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method.
+func (client *containerClient) GetProperties(ctx context.Context, containerClientGetPropertiesOptions *containerClientGetPropertiesOptions, leaseAccessConditions *LeaseAccessConditions) (containerClientGetPropertiesResponse, error) {
+ req, err := client.getPropertiesCreateRequest(ctx, containerClientGetPropertiesOptions, leaseAccessConditions)
if err != nil {
- return ContainerGetPropertiesResponse{}, err
+ return containerClientGetPropertiesResponse{}, err
}
- resp, err := client.con.Pipeline().Do(req)
+ resp, err := client.pl.Do(req)
if err != nil {
- return ContainerGetPropertiesResponse{}, err
+ return containerClientGetPropertiesResponse{}, err
}
if !runtime.HasStatusCode(resp, http.StatusOK) {
- return ContainerGetPropertiesResponse{}, runtime.NewResponseError(resp)
+ return containerClientGetPropertiesResponse{}, runtime.NewResponseError(resp)
}
return client.getPropertiesHandleResponse(resp)
}
// getPropertiesCreateRequest creates the GetProperties request.
-func (client *containerClient) getPropertiesCreateRequest(ctx context.Context, containerGetPropertiesOptions *ContainerGetPropertiesOptions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) {
- req, err := runtime.NewRequest(ctx, http.MethodGet, client.con.Endpoint())
+func (client *containerClient) getPropertiesCreateRequest(ctx context.Context, containerClientGetPropertiesOptions *containerClientGetPropertiesOptions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) {
+ req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint)
if err != nil {
return nil, err
}
reqQP := req.Raw().URL.Query()
reqQP.Set("restype", "container")
- if containerGetPropertiesOptions != nil && containerGetPropertiesOptions.Timeout != nil {
- reqQP.Set("timeout", strconv.FormatInt(int64(*containerGetPropertiesOptions.Timeout), 10))
+ if containerClientGetPropertiesOptions != nil && containerClientGetPropertiesOptions.Timeout != nil {
+ reqQP.Set("timeout", strconv.FormatInt(int64(*containerClientGetPropertiesOptions.Timeout), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID)
}
- req.Raw().Header.Set("x-ms-version", "2019-12-12")
- if containerGetPropertiesOptions != nil && containerGetPropertiesOptions.RequestID != nil {
- req.Raw().Header.Set("x-ms-client-request-id", *containerGetPropertiesOptions.RequestID)
+ req.Raw().Header.Set("x-ms-version", "2020-10-02")
+ if containerClientGetPropertiesOptions != nil && containerClientGetPropertiesOptions.RequestID != nil {
+ req.Raw().Header.Set("x-ms-client-request-id", *containerClientGetPropertiesOptions.RequestID)
}
req.Raw().Header.Set("Accept", "application/xml")
return req, nil
}
// getPropertiesHandleResponse handles the GetProperties response.
-func (client *containerClient) getPropertiesHandleResponse(resp *http.Response) (ContainerGetPropertiesResponse, error) {
- result := ContainerGetPropertiesResponse{RawResponse: resp}
+func (client *containerClient) getPropertiesHandleResponse(resp *http.Response) (containerClientGetPropertiesResponse, error) {
+ result := containerClientGetPropertiesResponse{RawResponse: resp}
for hh := range resp.Header {
if len(hh) > len("x-ms-meta-") && strings.EqualFold(hh[:len("x-ms-meta-")], "x-ms-meta-") {
if result.Metadata == nil {
@@ -629,7 +675,7 @@ func (client *containerClient) getPropertiesHandleResponse(resp *http.Response)
if val := resp.Header.Get("Last-Modified"); val != "" {
lastModified, err := time.Parse(time.RFC1123, val)
if err != nil {
- return ContainerGetPropertiesResponse{}, err
+ return containerClientGetPropertiesResponse{}, err
}
result.LastModified = &lastModified
}
@@ -654,7 +700,7 @@ func (client *containerClient) getPropertiesHandleResponse(resp *http.Response)
if val := resp.Header.Get("Date"); val != "" {
date, err := time.Parse(time.RFC1123, val)
if err != nil {
- return ContainerGetPropertiesResponse{}, err
+ return containerClientGetPropertiesResponse{}, err
}
result.Date = &date
}
@@ -664,14 +710,14 @@ func (client *containerClient) getPropertiesHandleResponse(resp *http.Response)
if val := resp.Header.Get("x-ms-has-immutability-policy"); val != "" {
hasImmutabilityPolicy, err := strconv.ParseBool(val)
if err != nil {
- return ContainerGetPropertiesResponse{}, err
+ return containerClientGetPropertiesResponse{}, err
}
result.HasImmutabilityPolicy = &hasImmutabilityPolicy
}
if val := resp.Header.Get("x-ms-has-legal-hold"); val != "" {
hasLegalHold, err := strconv.ParseBool(val)
if err != nil {
- return ContainerGetPropertiesResponse{}, err
+ return containerClientGetPropertiesResponse{}, err
}
result.HasLegalHold = &hasLegalHold
}
@@ -681,30 +727,39 @@ func (client *containerClient) getPropertiesHandleResponse(resp *http.Response)
if val := resp.Header.Get("x-ms-deny-encryption-scope-override"); val != "" {
denyEncryptionScopeOverride, err := strconv.ParseBool(val)
if err != nil {
- return ContainerGetPropertiesResponse{}, err
+ return containerClientGetPropertiesResponse{}, err
}
result.DenyEncryptionScopeOverride = &denyEncryptionScopeOverride
}
+ if val := resp.Header.Get("x-ms-immutable-storage-with-versioning-enabled"); val != "" {
+ isImmutableStorageWithVersioningEnabled, err := strconv.ParseBool(val)
+ if err != nil {
+ return containerClientGetPropertiesResponse{}, err
+ }
+ result.IsImmutableStorageWithVersioningEnabled = &isImmutableStorageWithVersioningEnabled
+ }
return result, nil
}
// ListBlobFlatSegment - [Update] The List Blobs operation returns a list of the blobs under the specified container
-// If the operation fails it returns the *StorageError error type.
-func (client *containerClient) ListBlobFlatSegment(options *ContainerListBlobFlatSegmentOptions) *ContainerListBlobFlatSegmentPager {
- return &ContainerListBlobFlatSegmentPager{
+// If the operation fails it returns an *azcore.ResponseError type.
+// options - containerClientListBlobFlatSegmentOptions contains the optional parameters for the containerClient.ListBlobFlatSegment
+// method.
+func (client *containerClient) ListBlobFlatSegment(options *containerClientListBlobFlatSegmentOptions) *containerClientListBlobFlatSegmentPager {
+ return &containerClientListBlobFlatSegmentPager{
client: client,
requester: func(ctx context.Context) (*policy.Request, error) {
return client.listBlobFlatSegmentCreateRequest(ctx, options)
},
- advancer: func(ctx context.Context, resp ContainerListBlobFlatSegmentResponse) (*policy.Request, error) {
+ advancer: func(ctx context.Context, resp containerClientListBlobFlatSegmentResponse) (*policy.Request, error) {
return runtime.NewRequest(ctx, http.MethodGet, *resp.ListBlobsFlatSegmentResponse.NextMarker)
},
}
}
// listBlobFlatSegmentCreateRequest creates the ListBlobFlatSegment request.
-func (client *containerClient) listBlobFlatSegmentCreateRequest(ctx context.Context, options *ContainerListBlobFlatSegmentOptions) (*policy.Request, error) {
- req, err := runtime.NewRequest(ctx, http.MethodGet, client.con.Endpoint())
+func (client *containerClient) listBlobFlatSegmentCreateRequest(ctx context.Context, options *containerClientListBlobFlatSegmentOptions) (*policy.Request, error) {
+ req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint)
if err != nil {
return nil, err
}
@@ -727,7 +782,7 @@ func (client *containerClient) listBlobFlatSegmentCreateRequest(ctx context.Cont
reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
- req.Raw().Header.Set("x-ms-version", "2019-12-12")
+ req.Raw().Header.Set("x-ms-version", "2020-10-02")
if options != nil && options.RequestID != nil {
req.Raw().Header.Set("x-ms-client-request-id", *options.RequestID)
}
@@ -736,8 +791,8 @@ func (client *containerClient) listBlobFlatSegmentCreateRequest(ctx context.Cont
}
// listBlobFlatSegmentHandleResponse handles the ListBlobFlatSegment response.
-func (client *containerClient) listBlobFlatSegmentHandleResponse(resp *http.Response) (ContainerListBlobFlatSegmentResponse, error) {
- result := ContainerListBlobFlatSegmentResponse{RawResponse: resp}
+func (client *containerClient) listBlobFlatSegmentHandleResponse(resp *http.Response) (containerClientListBlobFlatSegmentResponse, error) {
+ result := containerClientListBlobFlatSegmentResponse{RawResponse: resp}
if val := resp.Header.Get("Content-Type"); val != "" {
result.ContentType = &val
}
@@ -753,33 +808,38 @@ func (client *containerClient) listBlobFlatSegmentHandleResponse(resp *http.Resp
if val := resp.Header.Get("Date"); val != "" {
date, err := time.Parse(time.RFC1123, val)
if err != nil {
- return ContainerListBlobFlatSegmentResponse{}, err
+ return containerClientListBlobFlatSegmentResponse{}, err
}
result.Date = &date
}
if err := runtime.UnmarshalAsXML(resp, &result.ListBlobsFlatSegmentResponse); err != nil {
- return ContainerListBlobFlatSegmentResponse{}, err
+ return containerClientListBlobFlatSegmentResponse{}, err
}
return result, nil
}
// ListBlobHierarchySegment - [Update] The List Blobs operation returns a list of the blobs under the specified container
-// If the operation fails it returns the *StorageError error type.
-func (client *containerClient) ListBlobHierarchySegment(delimiter string, options *ContainerListBlobHierarchySegmentOptions) *ContainerListBlobHierarchySegmentPager {
- return &ContainerListBlobHierarchySegmentPager{
+// If the operation fails it returns an *azcore.ResponseError type.
+// delimiter - When the request includes this parameter, the operation returns a BlobPrefix element in the response body that
+// acts as a placeholder for all blobs whose names begin with the same substring up to the
+// appearance of the delimiter character. The delimiter may be a single character or a string.
+// options - containerClientListBlobHierarchySegmentOptions contains the optional parameters for the containerClient.ListBlobHierarchySegment
+// method.
+func (client *containerClient) ListBlobHierarchySegment(delimiter string, options *containerClientListBlobHierarchySegmentOptions) *containerClientListBlobHierarchySegmentPager {
+ return &containerClientListBlobHierarchySegmentPager{
client: client,
requester: func(ctx context.Context) (*policy.Request, error) {
return client.listBlobHierarchySegmentCreateRequest(ctx, delimiter, options)
},
- advancer: func(ctx context.Context, resp ContainerListBlobHierarchySegmentResponse) (*policy.Request, error) {
+ advancer: func(ctx context.Context, resp containerClientListBlobHierarchySegmentResponse) (*policy.Request, error) {
return runtime.NewRequest(ctx, http.MethodGet, *resp.ListBlobsHierarchySegmentResponse.NextMarker)
},
}
}
// listBlobHierarchySegmentCreateRequest creates the ListBlobHierarchySegment request.
-func (client *containerClient) listBlobHierarchySegmentCreateRequest(ctx context.Context, delimiter string, options *ContainerListBlobHierarchySegmentOptions) (*policy.Request, error) {
- req, err := runtime.NewRequest(ctx, http.MethodGet, client.con.Endpoint())
+func (client *containerClient) listBlobHierarchySegmentCreateRequest(ctx context.Context, delimiter string, options *containerClientListBlobHierarchySegmentOptions) (*policy.Request, error) {
+ req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint)
if err != nil {
return nil, err
}
@@ -803,7 +863,7 @@ func (client *containerClient) listBlobHierarchySegmentCreateRequest(ctx context
reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
- req.Raw().Header.Set("x-ms-version", "2019-12-12")
+ req.Raw().Header.Set("x-ms-version", "2020-10-02")
if options != nil && options.RequestID != nil {
req.Raw().Header.Set("x-ms-client-request-id", *options.RequestID)
}
@@ -812,8 +872,8 @@ func (client *containerClient) listBlobHierarchySegmentCreateRequest(ctx context
}
// listBlobHierarchySegmentHandleResponse handles the ListBlobHierarchySegment response.
-func (client *containerClient) listBlobHierarchySegmentHandleResponse(resp *http.Response) (ContainerListBlobHierarchySegmentResponse, error) {
- result := ContainerListBlobHierarchySegmentResponse{RawResponse: resp}
+func (client *containerClient) listBlobHierarchySegmentHandleResponse(resp *http.Response) (containerClientListBlobHierarchySegmentResponse, error) {
+ result := containerClientListBlobHierarchySegmentResponse{RawResponse: resp}
if val := resp.Header.Get("Content-Type"); val != "" {
result.ContentType = &val
}
@@ -829,44 +889,49 @@ func (client *containerClient) listBlobHierarchySegmentHandleResponse(resp *http
if val := resp.Header.Get("Date"); val != "" {
date, err := time.Parse(time.RFC1123, val)
if err != nil {
- return ContainerListBlobHierarchySegmentResponse{}, err
+ return containerClientListBlobHierarchySegmentResponse{}, err
}
result.Date = &date
}
if err := runtime.UnmarshalAsXML(resp, &result.ListBlobsHierarchySegmentResponse); err != nil {
- return ContainerListBlobHierarchySegmentResponse{}, err
+ return containerClientListBlobHierarchySegmentResponse{}, err
}
return result, nil
}
-// ReleaseLease - [Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15 to 60 seconds, or can be infinite
-// If the operation fails it returns the *StorageError error type.
-func (client *containerClient) ReleaseLease(ctx context.Context, leaseID string, containerReleaseLeaseOptions *ContainerReleaseLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerReleaseLeaseResponse, error) {
- req, err := client.releaseLeaseCreateRequest(ctx, leaseID, containerReleaseLeaseOptions, modifiedAccessConditions)
+// ReleaseLease - [Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15
+// to 60 seconds, or can be infinite
+// If the operation fails it returns an *azcore.ResponseError type.
+// leaseID - Specifies the current lease ID on the resource.
+// containerClientReleaseLeaseOptions - containerClientReleaseLeaseOptions contains the optional parameters for the containerClient.ReleaseLease
+// method.
+// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method.
+func (client *containerClient) ReleaseLease(ctx context.Context, leaseID string, containerClientReleaseLeaseOptions *containerClientReleaseLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (containerClientReleaseLeaseResponse, error) {
+ req, err := client.releaseLeaseCreateRequest(ctx, leaseID, containerClientReleaseLeaseOptions, modifiedAccessConditions)
if err != nil {
- return ContainerReleaseLeaseResponse{}, err
+ return containerClientReleaseLeaseResponse{}, err
}
- resp, err := client.con.Pipeline().Do(req)
+ resp, err := client.pl.Do(req)
if err != nil {
- return ContainerReleaseLeaseResponse{}, err
+ return containerClientReleaseLeaseResponse{}, err
}
if !runtime.HasStatusCode(resp, http.StatusOK) {
- return ContainerReleaseLeaseResponse{}, runtime.NewResponseError(resp)
+ return containerClientReleaseLeaseResponse{}, runtime.NewResponseError(resp)
}
return client.releaseLeaseHandleResponse(resp)
}
// releaseLeaseCreateRequest creates the ReleaseLease request.
-func (client *containerClient) releaseLeaseCreateRequest(ctx context.Context, leaseID string, containerReleaseLeaseOptions *ContainerReleaseLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) {
- req, err := runtime.NewRequest(ctx, http.MethodPut, client.con.Endpoint())
+func (client *containerClient) releaseLeaseCreateRequest(ctx context.Context, leaseID string, containerClientReleaseLeaseOptions *containerClientReleaseLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) {
+ req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint)
if err != nil {
return nil, err
}
reqQP := req.Raw().URL.Query()
reqQP.Set("comp", "lease")
reqQP.Set("restype", "container")
- if containerReleaseLeaseOptions != nil && containerReleaseLeaseOptions.Timeout != nil {
- reqQP.Set("timeout", strconv.FormatInt(int64(*containerReleaseLeaseOptions.Timeout), 10))
+ if containerClientReleaseLeaseOptions != nil && containerClientReleaseLeaseOptions.Timeout != nil {
+ reqQP.Set("timeout", strconv.FormatInt(int64(*containerClientReleaseLeaseOptions.Timeout), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
req.Raw().Header.Set("x-ms-lease-action", "release")
@@ -877,24 +942,24 @@ func (client *containerClient) releaseLeaseCreateRequest(ctx context.Context, le
if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123))
}
- req.Raw().Header.Set("x-ms-version", "2019-12-12")
- if containerReleaseLeaseOptions != nil && containerReleaseLeaseOptions.RequestID != nil {
- req.Raw().Header.Set("x-ms-client-request-id", *containerReleaseLeaseOptions.RequestID)
+ req.Raw().Header.Set("x-ms-version", "2020-10-02")
+ if containerClientReleaseLeaseOptions != nil && containerClientReleaseLeaseOptions.RequestID != nil {
+ req.Raw().Header.Set("x-ms-client-request-id", *containerClientReleaseLeaseOptions.RequestID)
}
req.Raw().Header.Set("Accept", "application/xml")
return req, nil
}
// releaseLeaseHandleResponse handles the ReleaseLease response.
-func (client *containerClient) releaseLeaseHandleResponse(resp *http.Response) (ContainerReleaseLeaseResponse, error) {
- result := ContainerReleaseLeaseResponse{RawResponse: resp}
+func (client *containerClient) releaseLeaseHandleResponse(resp *http.Response) (containerClientReleaseLeaseResponse, error) {
+ result := containerClientReleaseLeaseResponse{RawResponse: resp}
if val := resp.Header.Get("ETag"); val != "" {
result.ETag = &val
}
if val := resp.Header.Get("Last-Modified"); val != "" {
lastModified, err := time.Parse(time.RFC1123, val)
if err != nil {
- return ContainerReleaseLeaseResponse{}, err
+ return containerClientReleaseLeaseResponse{}, err
}
result.LastModified = &lastModified
}
@@ -910,41 +975,112 @@ func (client *containerClient) releaseLeaseHandleResponse(resp *http.Response) (
if val := resp.Header.Get("Date"); val != "" {
date, err := time.Parse(time.RFC1123, val)
if err != nil {
- return ContainerReleaseLeaseResponse{}, err
+ return containerClientReleaseLeaseResponse{}, err
+ }
+ result.Date = &date
+ }
+ return result, nil
+}
+
+// Rename - Renames an existing container.
+// If the operation fails it returns an *azcore.ResponseError type.
+// sourceContainerName - Required. Specifies the name of the container to rename.
+// options - containerClientRenameOptions contains the optional parameters for the containerClient.Rename method.
+func (client *containerClient) Rename(ctx context.Context, sourceContainerName string, options *containerClientRenameOptions) (containerClientRenameResponse, error) {
+ req, err := client.renameCreateRequest(ctx, sourceContainerName, options)
+ if err != nil {
+ return containerClientRenameResponse{}, err
+ }
+ resp, err := client.pl.Do(req)
+ if err != nil {
+ return containerClientRenameResponse{}, err
+ }
+ if !runtime.HasStatusCode(resp, http.StatusOK) {
+ return containerClientRenameResponse{}, runtime.NewResponseError(resp)
+ }
+ return client.renameHandleResponse(resp)
+}
+
+// renameCreateRequest creates the Rename request.
+func (client *containerClient) renameCreateRequest(ctx context.Context, sourceContainerName string, options *containerClientRenameOptions) (*policy.Request, error) {
+ req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint)
+ if err != nil {
+ return nil, err
+ }
+ reqQP := req.Raw().URL.Query()
+ reqQP.Set("restype", "container")
+ reqQP.Set("comp", "rename")
+ if options != nil && options.Timeout != nil {
+ reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
+ }
+ req.Raw().URL.RawQuery = reqQP.Encode()
+ req.Raw().Header.Set("x-ms-version", "2020-10-02")
+ if options != nil && options.RequestID != nil {
+ req.Raw().Header.Set("x-ms-client-request-id", *options.RequestID)
+ }
+ req.Raw().Header.Set("x-ms-source-container-name", sourceContainerName)
+ if options != nil && options.SourceLeaseID != nil {
+ req.Raw().Header.Set("x-ms-source-lease-id", *options.SourceLeaseID)
+ }
+ req.Raw().Header.Set("Accept", "application/xml")
+ return req, nil
+}
+
+// renameHandleResponse handles the Rename response.
+func (client *containerClient) renameHandleResponse(resp *http.Response) (containerClientRenameResponse, error) {
+ result := containerClientRenameResponse{RawResponse: resp}
+ if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
+ result.ClientRequestID = &val
+ }
+ if val := resp.Header.Get("x-ms-request-id"); val != "" {
+ result.RequestID = &val
+ }
+ if val := resp.Header.Get("x-ms-version"); val != "" {
+ result.Version = &val
+ }
+ if val := resp.Header.Get("Date"); val != "" {
+ date, err := time.Parse(time.RFC1123, val)
+ if err != nil {
+ return containerClientRenameResponse{}, err
}
result.Date = &date
}
return result, nil
}
-// RenewLease - [Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15 to 60 seconds, or can be infinite
-// If the operation fails it returns the *StorageError error type.
-func (client *containerClient) RenewLease(ctx context.Context, leaseID string, containerRenewLeaseOptions *ContainerRenewLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerRenewLeaseResponse, error) {
- req, err := client.renewLeaseCreateRequest(ctx, leaseID, containerRenewLeaseOptions, modifiedAccessConditions)
+// RenewLease - [Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15
+// to 60 seconds, or can be infinite
+// If the operation fails it returns an *azcore.ResponseError type.
+// leaseID - Specifies the current lease ID on the resource.
+// containerClientRenewLeaseOptions - containerClientRenewLeaseOptions contains the optional parameters for the containerClient.RenewLease
+// method.
+// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method.
+func (client *containerClient) RenewLease(ctx context.Context, leaseID string, containerClientRenewLeaseOptions *containerClientRenewLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (containerClientRenewLeaseResponse, error) {
+ req, err := client.renewLeaseCreateRequest(ctx, leaseID, containerClientRenewLeaseOptions, modifiedAccessConditions)
if err != nil {
- return ContainerRenewLeaseResponse{}, err
+ return containerClientRenewLeaseResponse{}, err
}
- resp, err := client.con.Pipeline().Do(req)
+ resp, err := client.pl.Do(req)
if err != nil {
- return ContainerRenewLeaseResponse{}, err
+ return containerClientRenewLeaseResponse{}, err
}
if !runtime.HasStatusCode(resp, http.StatusOK) {
- return ContainerRenewLeaseResponse{}, runtime.NewResponseError(resp)
+ return containerClientRenewLeaseResponse{}, runtime.NewResponseError(resp)
}
return client.renewLeaseHandleResponse(resp)
}
// renewLeaseCreateRequest creates the RenewLease request.
-func (client *containerClient) renewLeaseCreateRequest(ctx context.Context, leaseID string, containerRenewLeaseOptions *ContainerRenewLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) {
- req, err := runtime.NewRequest(ctx, http.MethodPut, client.con.Endpoint())
+func (client *containerClient) renewLeaseCreateRequest(ctx context.Context, leaseID string, containerClientRenewLeaseOptions *containerClientRenewLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) {
+ req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint)
if err != nil {
return nil, err
}
reqQP := req.Raw().URL.Query()
reqQP.Set("comp", "lease")
reqQP.Set("restype", "container")
- if containerRenewLeaseOptions != nil && containerRenewLeaseOptions.Timeout != nil {
- reqQP.Set("timeout", strconv.FormatInt(int64(*containerRenewLeaseOptions.Timeout), 10))
+ if containerClientRenewLeaseOptions != nil && containerClientRenewLeaseOptions.Timeout != nil {
+ reqQP.Set("timeout", strconv.FormatInt(int64(*containerClientRenewLeaseOptions.Timeout), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
req.Raw().Header.Set("x-ms-lease-action", "renew")
@@ -955,24 +1091,24 @@ func (client *containerClient) renewLeaseCreateRequest(ctx context.Context, leas
if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123))
}
- req.Raw().Header.Set("x-ms-version", "2019-12-12")
- if containerRenewLeaseOptions != nil && containerRenewLeaseOptions.RequestID != nil {
- req.Raw().Header.Set("x-ms-client-request-id", *containerRenewLeaseOptions.RequestID)
+ req.Raw().Header.Set("x-ms-version", "2020-10-02")
+ if containerClientRenewLeaseOptions != nil && containerClientRenewLeaseOptions.RequestID != nil {
+ req.Raw().Header.Set("x-ms-client-request-id", *containerClientRenewLeaseOptions.RequestID)
}
req.Raw().Header.Set("Accept", "application/xml")
return req, nil
}
// renewLeaseHandleResponse handles the RenewLease response.
-func (client *containerClient) renewLeaseHandleResponse(resp *http.Response) (ContainerRenewLeaseResponse, error) {
- result := ContainerRenewLeaseResponse{RawResponse: resp}
+func (client *containerClient) renewLeaseHandleResponse(resp *http.Response) (containerClientRenewLeaseResponse, error) {
+ result := containerClientRenewLeaseResponse{RawResponse: resp}
if val := resp.Header.Get("ETag"); val != "" {
result.ETag = &val
}
if val := resp.Header.Get("Last-Modified"); val != "" {
lastModified, err := time.Parse(time.RFC1123, val)
if err != nil {
- return ContainerRenewLeaseResponse{}, err
+ return containerClientRenewLeaseResponse{}, err
}
result.LastModified = &lastModified
}
@@ -991,7 +1127,7 @@ func (client *containerClient) renewLeaseHandleResponse(resp *http.Response) (Co
if val := resp.Header.Get("Date"); val != "" {
date, err := time.Parse(time.RFC1123, val)
if err != nil {
- return ContainerRenewLeaseResponse{}, err
+ return containerClientRenewLeaseResponse{}, err
}
result.Date = &date
}
@@ -999,25 +1135,26 @@ func (client *containerClient) renewLeaseHandleResponse(resp *http.Response) (Co
}
// Restore - Restores a previously-deleted container.
-// If the operation fails it returns the *StorageError error type.
-func (client *containerClient) Restore(ctx context.Context, options *ContainerRestoreOptions) (ContainerRestoreResponse, error) {
+// If the operation fails it returns an *azcore.ResponseError type.
+// options - containerClientRestoreOptions contains the optional parameters for the containerClient.Restore method.
+func (client *containerClient) Restore(ctx context.Context, options *containerClientRestoreOptions) (containerClientRestoreResponse, error) {
req, err := client.restoreCreateRequest(ctx, options)
if err != nil {
- return ContainerRestoreResponse{}, err
+ return containerClientRestoreResponse{}, err
}
- resp, err := client.con.Pipeline().Do(req)
+ resp, err := client.pl.Do(req)
if err != nil {
- return ContainerRestoreResponse{}, err
+ return containerClientRestoreResponse{}, err
}
if !runtime.HasStatusCode(resp, http.StatusCreated) {
- return ContainerRestoreResponse{}, runtime.NewResponseError(resp)
+ return containerClientRestoreResponse{}, runtime.NewResponseError(resp)
}
return client.restoreHandleResponse(resp)
}
// restoreCreateRequest creates the Restore request.
-func (client *containerClient) restoreCreateRequest(ctx context.Context, options *ContainerRestoreOptions) (*policy.Request, error) {
- req, err := runtime.NewRequest(ctx, http.MethodPut, client.con.Endpoint())
+func (client *containerClient) restoreCreateRequest(ctx context.Context, options *containerClientRestoreOptions) (*policy.Request, error) {
+ req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint)
if err != nil {
return nil, err
}
@@ -1028,7 +1165,7 @@ func (client *containerClient) restoreCreateRequest(ctx context.Context, options
reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
- req.Raw().Header.Set("x-ms-version", "2019-12-12")
+ req.Raw().Header.Set("x-ms-version", "2020-10-02")
if options != nil && options.RequestID != nil {
req.Raw().Header.Set("x-ms-client-request-id", *options.RequestID)
}
@@ -1043,8 +1180,8 @@ func (client *containerClient) restoreCreateRequest(ctx context.Context, options
}
// restoreHandleResponse handles the Restore response.
-func (client *containerClient) restoreHandleResponse(resp *http.Response) (ContainerRestoreResponse, error) {
- result := ContainerRestoreResponse{RawResponse: resp}
+func (client *containerClient) restoreHandleResponse(resp *http.Response) (containerClientRestoreResponse, error) {
+ result := containerClientRestoreResponse{RawResponse: resp}
if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
result.ClientRequestID = &val
}
@@ -1057,48 +1194,53 @@ func (client *containerClient) restoreHandleResponse(resp *http.Response) (Conta
if val := resp.Header.Get("Date"); val != "" {
date, err := time.Parse(time.RFC1123, val)
if err != nil {
- return ContainerRestoreResponse{}, err
+ return containerClientRestoreResponse{}, err
}
result.Date = &date
}
return result, nil
}
-// SetAccessPolicy - sets the permissions for the specified container. The permissions indicate whether blobs in a container may be accessed publicly.
-// If the operation fails it returns the *StorageError error type.
-func (client *containerClient) SetAccessPolicy(ctx context.Context, containerSetAccessPolicyOptions *ContainerSetAccessPolicyOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerSetAccessPolicyResponse, error) {
- req, err := client.setAccessPolicyCreateRequest(ctx, containerSetAccessPolicyOptions, leaseAccessConditions, modifiedAccessConditions)
+// SetAccessPolicy - sets the permissions for the specified container. The permissions indicate whether blobs in a container
+// may be accessed publicly.
+// If the operation fails it returns an *azcore.ResponseError type.
+// containerClientSetAccessPolicyOptions - containerClientSetAccessPolicyOptions contains the optional parameters for the
+// containerClient.SetAccessPolicy method.
+// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method.
+// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method.
+func (client *containerClient) SetAccessPolicy(ctx context.Context, containerClientSetAccessPolicyOptions *containerClientSetAccessPolicyOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (containerClientSetAccessPolicyResponse, error) {
+ req, err := client.setAccessPolicyCreateRequest(ctx, containerClientSetAccessPolicyOptions, leaseAccessConditions, modifiedAccessConditions)
if err != nil {
- return ContainerSetAccessPolicyResponse{}, err
+ return containerClientSetAccessPolicyResponse{}, err
}
- resp, err := client.con.Pipeline().Do(req)
+ resp, err := client.pl.Do(req)
if err != nil {
- return ContainerSetAccessPolicyResponse{}, err
+ return containerClientSetAccessPolicyResponse{}, err
}
if !runtime.HasStatusCode(resp, http.StatusOK) {
- return ContainerSetAccessPolicyResponse{}, runtime.NewResponseError(resp)
+ return containerClientSetAccessPolicyResponse{}, runtime.NewResponseError(resp)
}
return client.setAccessPolicyHandleResponse(resp)
}
// setAccessPolicyCreateRequest creates the SetAccessPolicy request.
-func (client *containerClient) setAccessPolicyCreateRequest(ctx context.Context, containerSetAccessPolicyOptions *ContainerSetAccessPolicyOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) {
- req, err := runtime.NewRequest(ctx, http.MethodPut, client.con.Endpoint())
+func (client *containerClient) setAccessPolicyCreateRequest(ctx context.Context, containerClientSetAccessPolicyOptions *containerClientSetAccessPolicyOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) {
+ req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint)
if err != nil {
return nil, err
}
reqQP := req.Raw().URL.Query()
reqQP.Set("restype", "container")
reqQP.Set("comp", "acl")
- if containerSetAccessPolicyOptions != nil && containerSetAccessPolicyOptions.Timeout != nil {
- reqQP.Set("timeout", strconv.FormatInt(int64(*containerSetAccessPolicyOptions.Timeout), 10))
+ if containerClientSetAccessPolicyOptions != nil && containerClientSetAccessPolicyOptions.Timeout != nil {
+ reqQP.Set("timeout", strconv.FormatInt(int64(*containerClientSetAccessPolicyOptions.Timeout), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID)
}
- if containerSetAccessPolicyOptions != nil && containerSetAccessPolicyOptions.Access != nil {
- req.Raw().Header.Set("x-ms-blob-public-access", string(*containerSetAccessPolicyOptions.Access))
+ if containerClientSetAccessPolicyOptions != nil && containerClientSetAccessPolicyOptions.Access != nil {
+ req.Raw().Header.Set("x-ms-blob-public-access", string(*containerClientSetAccessPolicyOptions.Access))
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123))
@@ -1106,31 +1248,31 @@ func (client *containerClient) setAccessPolicyCreateRequest(ctx context.Context,
if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123))
}
- req.Raw().Header.Set("x-ms-version", "2019-12-12")
- if containerSetAccessPolicyOptions != nil && containerSetAccessPolicyOptions.RequestID != nil {
- req.Raw().Header.Set("x-ms-client-request-id", *containerSetAccessPolicyOptions.RequestID)
+ req.Raw().Header.Set("x-ms-version", "2020-10-02")
+ if containerClientSetAccessPolicyOptions != nil && containerClientSetAccessPolicyOptions.RequestID != nil {
+ req.Raw().Header.Set("x-ms-client-request-id", *containerClientSetAccessPolicyOptions.RequestID)
}
req.Raw().Header.Set("Accept", "application/xml")
type wrapper struct {
XMLName xml.Name `xml:"SignedIdentifiers"`
ContainerACL *[]*SignedIdentifier `xml:"SignedIdentifier"`
}
- if containerSetAccessPolicyOptions != nil && containerSetAccessPolicyOptions.ContainerACL != nil {
- return req, runtime.MarshalAsXML(req, wrapper{ContainerACL: &containerSetAccessPolicyOptions.ContainerACL})
+ if containerClientSetAccessPolicyOptions != nil && containerClientSetAccessPolicyOptions.ContainerACL != nil {
+ return req, runtime.MarshalAsXML(req, wrapper{ContainerACL: &containerClientSetAccessPolicyOptions.ContainerACL})
}
return req, nil
}
// setAccessPolicyHandleResponse handles the SetAccessPolicy response.
-func (client *containerClient) setAccessPolicyHandleResponse(resp *http.Response) (ContainerSetAccessPolicyResponse, error) {
- result := ContainerSetAccessPolicyResponse{RawResponse: resp}
+func (client *containerClient) setAccessPolicyHandleResponse(resp *http.Response) (containerClientSetAccessPolicyResponse, error) {
+ result := containerClientSetAccessPolicyResponse{RawResponse: resp}
if val := resp.Header.Get("ETag"); val != "" {
result.ETag = &val
}
if val := resp.Header.Get("Last-Modified"); val != "" {
lastModified, err := time.Parse(time.RFC1123, val)
if err != nil {
- return ContainerSetAccessPolicyResponse{}, err
+ return containerClientSetAccessPolicyResponse{}, err
}
result.LastModified = &lastModified
}
@@ -1146,7 +1288,7 @@ func (client *containerClient) setAccessPolicyHandleResponse(resp *http.Response
if val := resp.Header.Get("Date"); val != "" {
date, err := time.Parse(time.RFC1123, val)
if err != nil {
- return ContainerSetAccessPolicyResponse{}, err
+ return containerClientSetAccessPolicyResponse{}, err
}
result.Date = &date
}
@@ -1154,64 +1296,68 @@ func (client *containerClient) setAccessPolicyHandleResponse(resp *http.Response
}
// SetMetadata - operation sets one or more user-defined name-value pairs for the specified container.
-// If the operation fails it returns the *StorageError error type.
-func (client *containerClient) SetMetadata(ctx context.Context, containerSetMetadataOptions *ContainerSetMetadataOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerSetMetadataResponse, error) {
- req, err := client.setMetadataCreateRequest(ctx, containerSetMetadataOptions, leaseAccessConditions, modifiedAccessConditions)
+// If the operation fails it returns an *azcore.ResponseError type.
+// containerClientSetMetadataOptions - containerClientSetMetadataOptions contains the optional parameters for the containerClient.SetMetadata
+// method.
+// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method.
+// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method.
+func (client *containerClient) SetMetadata(ctx context.Context, containerClientSetMetadataOptions *containerClientSetMetadataOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (containerClientSetMetadataResponse, error) {
+ req, err := client.setMetadataCreateRequest(ctx, containerClientSetMetadataOptions, leaseAccessConditions, modifiedAccessConditions)
if err != nil {
- return ContainerSetMetadataResponse{}, err
+ return containerClientSetMetadataResponse{}, err
}
- resp, err := client.con.Pipeline().Do(req)
+ resp, err := client.pl.Do(req)
if err != nil {
- return ContainerSetMetadataResponse{}, err
+ return containerClientSetMetadataResponse{}, err
}
if !runtime.HasStatusCode(resp, http.StatusOK) {
- return ContainerSetMetadataResponse{}, runtime.NewResponseError(resp)
+ return containerClientSetMetadataResponse{}, runtime.NewResponseError(resp)
}
return client.setMetadataHandleResponse(resp)
}
// setMetadataCreateRequest creates the SetMetadata request.
-func (client *containerClient) setMetadataCreateRequest(ctx context.Context, containerSetMetadataOptions *ContainerSetMetadataOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) {
- req, err := runtime.NewRequest(ctx, http.MethodPut, client.con.Endpoint())
+func (client *containerClient) setMetadataCreateRequest(ctx context.Context, containerClientSetMetadataOptions *containerClientSetMetadataOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) {
+ req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint)
if err != nil {
return nil, err
}
reqQP := req.Raw().URL.Query()
reqQP.Set("restype", "container")
reqQP.Set("comp", "metadata")
- if containerSetMetadataOptions != nil && containerSetMetadataOptions.Timeout != nil {
- reqQP.Set("timeout", strconv.FormatInt(int64(*containerSetMetadataOptions.Timeout), 10))
+ if containerClientSetMetadataOptions != nil && containerClientSetMetadataOptions.Timeout != nil {
+ reqQP.Set("timeout", strconv.FormatInt(int64(*containerClientSetMetadataOptions.Timeout), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID)
}
- if containerSetMetadataOptions != nil && containerSetMetadataOptions.Metadata != nil {
- for k, v := range containerSetMetadataOptions.Metadata {
+ if containerClientSetMetadataOptions != nil && containerClientSetMetadataOptions.Metadata != nil {
+ for k, v := range containerClientSetMetadataOptions.Metadata {
req.Raw().Header.Set("x-ms-meta-"+k, v)
}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123))
}
- req.Raw().Header.Set("x-ms-version", "2019-12-12")
- if containerSetMetadataOptions != nil && containerSetMetadataOptions.RequestID != nil {
- req.Raw().Header.Set("x-ms-client-request-id", *containerSetMetadataOptions.RequestID)
+ req.Raw().Header.Set("x-ms-version", "2020-10-02")
+ if containerClientSetMetadataOptions != nil && containerClientSetMetadataOptions.RequestID != nil {
+ req.Raw().Header.Set("x-ms-client-request-id", *containerClientSetMetadataOptions.RequestID)
}
req.Raw().Header.Set("Accept", "application/xml")
return req, nil
}
// setMetadataHandleResponse handles the SetMetadata response.
-func (client *containerClient) setMetadataHandleResponse(resp *http.Response) (ContainerSetMetadataResponse, error) {
- result := ContainerSetMetadataResponse{RawResponse: resp}
+func (client *containerClient) setMetadataHandleResponse(resp *http.Response) (containerClientSetMetadataResponse, error) {
+ result := containerClientSetMetadataResponse{RawResponse: resp}
if val := resp.Header.Get("ETag"); val != "" {
result.ETag = &val
}
if val := resp.Header.Get("Last-Modified"); val != "" {
lastModified, err := time.Parse(time.RFC1123, val)
if err != nil {
- return ContainerSetMetadataResponse{}, err
+ return containerClientSetMetadataResponse{}, err
}
result.LastModified = &lastModified
}
@@ -1227,9 +1373,70 @@ func (client *containerClient) setMetadataHandleResponse(resp *http.Response) (C
if val := resp.Header.Get("Date"); val != "" {
date, err := time.Parse(time.RFC1123, val)
if err != nil {
- return ContainerSetMetadataResponse{}, err
+ return containerClientSetMetadataResponse{}, err
}
result.Date = &date
}
return result, nil
}
+
+// SubmitBatch - The Batch operation allows multiple API calls to be embedded into a single HTTP request.
+// If the operation fails it returns an *azcore.ResponseError type.
+// contentLength - The length of the request.
+// multipartContentType - Required. The value of this header must be multipart/mixed with a batch boundary. Example header
+// value: multipart/mixed; boundary=batch_
+// body - Initial data
+// options - containerClientSubmitBatchOptions contains the optional parameters for the containerClient.SubmitBatch method.
+func (client *containerClient) SubmitBatch(ctx context.Context, contentLength int64, multipartContentType string, body io.ReadSeekCloser, options *containerClientSubmitBatchOptions) (containerClientSubmitBatchResponse, error) {
+ req, err := client.submitBatchCreateRequest(ctx, contentLength, multipartContentType, body, options)
+ if err != nil {
+ return containerClientSubmitBatchResponse{}, err
+ }
+ resp, err := client.pl.Do(req)
+ if err != nil {
+ return containerClientSubmitBatchResponse{}, err
+ }
+ if !runtime.HasStatusCode(resp, http.StatusAccepted) {
+ return containerClientSubmitBatchResponse{}, runtime.NewResponseError(resp)
+ }
+ return client.submitBatchHandleResponse(resp)
+}
+
+// submitBatchCreateRequest creates the SubmitBatch request.
+func (client *containerClient) submitBatchCreateRequest(ctx context.Context, contentLength int64, multipartContentType string, body io.ReadSeekCloser, options *containerClientSubmitBatchOptions) (*policy.Request, error) {
+ req, err := runtime.NewRequest(ctx, http.MethodPost, client.endpoint)
+ if err != nil {
+ return nil, err
+ }
+ reqQP := req.Raw().URL.Query()
+ reqQP.Set("restype", "container")
+ reqQP.Set("comp", "batch")
+ if options != nil && options.Timeout != nil {
+ reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
+ }
+ req.Raw().URL.RawQuery = reqQP.Encode()
+ runtime.SkipBodyDownload(req)
+ req.Raw().Header.Set("Content-Length", strconv.FormatInt(contentLength, 10))
+ req.Raw().Header.Set("Content-Type", multipartContentType)
+ req.Raw().Header.Set("x-ms-version", "2020-10-02")
+ if options != nil && options.RequestID != nil {
+ req.Raw().Header.Set("x-ms-client-request-id", *options.RequestID)
+ }
+ req.Raw().Header.Set("Accept", "application/xml")
+ return req, runtime.MarshalAsXML(req, body)
+}
+
+// submitBatchHandleResponse handles the SubmitBatch response.
+func (client *containerClient) submitBatchHandleResponse(resp *http.Response) (containerClientSubmitBatchResponse, error) {
+ result := containerClientSubmitBatchResponse{RawResponse: resp}
+ if val := resp.Header.Get("Content-Type"); val != "" {
+ result.ContentType = &val
+ }
+ if val := resp.Header.Get("x-ms-request-id"); val != "" {
+ result.RequestID = &val
+ }
+ if val := resp.Header.Get("x-ms-version"); val != "" {
+ result.Version = &val
+ }
+ return result, nil
+}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_directory_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_directory_client.go
deleted file mode 100644
index c3ce389d81b..00000000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_directory_client.go
+++ /dev/null
@@ -1,555 +0,0 @@
-//go:build go1.16
-// +build go1.16
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License. See License.txt in the project root for license information.
-// Code generated by Microsoft (R) AutoRest Code Generator.
-// Changes may cause incorrect behavior and will be lost if the code is regenerated.
-
-package azblob
-
-import (
- "context"
- "net/http"
- "strconv"
- "time"
-
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
-)
-
-type directoryClient struct {
- con *connection
- pathRenameMode *PathRenameMode
-}
-
-// Create - Create a directory. By default, the destination is overwritten and if the destination already exists and has a lease the lease is broken. This
-// operation supports conditional HTTP requests. For more
-// information, see Specifying Conditional Headers for Blob Service Operations [https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations].
-// To
-// fail if the destination already exists, use a conditional request with If-None-Match: "*".
-// If the operation fails it returns the *DataLakeStorageError error type.
-func (client *directoryClient) Create(ctx context.Context, directoryCreateOptions *DirectoryCreateOptions, directoryHTTPHeaders *DirectoryHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (DirectoryCreateResponse, error) {
- req, err := client.createCreateRequest(ctx, directoryCreateOptions, directoryHTTPHeaders, leaseAccessConditions, modifiedAccessConditions)
- if err != nil {
- return DirectoryCreateResponse{}, err
- }
- resp, err := client.con.Pipeline().Do(req)
- if err != nil {
- return DirectoryCreateResponse{}, err
- }
- if !runtime.HasStatusCode(resp, http.StatusCreated) {
- return DirectoryCreateResponse{}, runtime.NewResponseError(resp)
- }
- return client.createHandleResponse(resp)
-}
-
-// createCreateRequest creates the Create request.
-func (client *directoryClient) createCreateRequest(ctx context.Context, directoryCreateOptions *DirectoryCreateOptions, directoryHTTPHeaders *DirectoryHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) {
- req, err := runtime.NewRequest(ctx, http.MethodPut, client.con.Endpoint())
- if err != nil {
- return nil, err
- }
- reqQP := req.Raw().URL.Query()
- reqQP.Set("resource", "directory")
- if directoryCreateOptions != nil && directoryCreateOptions.Timeout != nil {
- reqQP.Set("timeout", strconv.FormatInt(int64(*directoryCreateOptions.Timeout), 10))
- }
- req.Raw().URL.RawQuery = reqQP.Encode()
- if directoryCreateOptions != nil && directoryCreateOptions.DirectoryProperties != nil {
- req.Raw().Header.Set("x-ms-properties", *directoryCreateOptions.DirectoryProperties)
- }
- if directoryCreateOptions != nil && directoryCreateOptions.PosixPermissions != nil {
- req.Raw().Header.Set("x-ms-permissions", *directoryCreateOptions.PosixPermissions)
- }
- if directoryCreateOptions != nil && directoryCreateOptions.PosixUmask != nil {
- req.Raw().Header.Set("x-ms-umask", *directoryCreateOptions.PosixUmask)
- }
- if directoryHTTPHeaders != nil && directoryHTTPHeaders.CacheControl != nil {
- req.Raw().Header.Set("x-ms-cache-control", *directoryHTTPHeaders.CacheControl)
- }
- if directoryHTTPHeaders != nil && directoryHTTPHeaders.ContentType != nil {
- req.Raw().Header.Set("x-ms-content-type", *directoryHTTPHeaders.ContentType)
- }
- if directoryHTTPHeaders != nil && directoryHTTPHeaders.ContentEncoding != nil {
- req.Raw().Header.Set("x-ms-content-encoding", *directoryHTTPHeaders.ContentEncoding)
- }
- if directoryHTTPHeaders != nil && directoryHTTPHeaders.ContentLanguage != nil {
- req.Raw().Header.Set("x-ms-content-language", *directoryHTTPHeaders.ContentLanguage)
- }
- if directoryHTTPHeaders != nil && directoryHTTPHeaders.ContentDisposition != nil {
- req.Raw().Header.Set("x-ms-content-disposition", *directoryHTTPHeaders.ContentDisposition)
- }
- if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
- req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID)
- }
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
- req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123))
- }
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
- req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123))
- }
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil {
- req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch)
- }
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil {
- req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch)
- }
- req.Raw().Header.Set("x-ms-version", "2019-12-12")
- if directoryCreateOptions != nil && directoryCreateOptions.RequestID != nil {
- req.Raw().Header.Set("x-ms-client-request-id", *directoryCreateOptions.RequestID)
- }
- req.Raw().Header.Set("Accept", "application/xml")
- return req, nil
-}
-
-// createHandleResponse handles the Create response.
-func (client *directoryClient) createHandleResponse(resp *http.Response) (DirectoryCreateResponse, error) {
- result := DirectoryCreateResponse{RawResponse: resp}
- if val := resp.Header.Get("ETag"); val != "" {
- result.ETag = &val
- }
- if val := resp.Header.Get("Last-Modified"); val != "" {
- lastModified, err := time.Parse(time.RFC1123, val)
- if err != nil {
- return DirectoryCreateResponse{}, err
- }
- result.LastModified = &lastModified
- }
- if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
- result.ClientRequestID = &val
- }
- if val := resp.Header.Get("x-ms-request-id"); val != "" {
- result.RequestID = &val
- }
- if val := resp.Header.Get("x-ms-version"); val != "" {
- result.Version = &val
- }
- if val := resp.Header.Get("Content-Length"); val != "" {
- contentLength, err := strconv.ParseInt(val, 10, 64)
- if err != nil {
- return DirectoryCreateResponse{}, err
- }
- result.ContentLength = &contentLength
- }
- if val := resp.Header.Get("Date"); val != "" {
- date, err := time.Parse(time.RFC1123, val)
- if err != nil {
- return DirectoryCreateResponse{}, err
- }
- result.Date = &date
- }
- return result, nil
-}
-
-// Delete - Deletes the directory
-// If the operation fails it returns the *DataLakeStorageError error type.
-func (client *directoryClient) Delete(ctx context.Context, recursiveDirectoryDelete bool, directoryDeleteOptions *DirectoryDeleteOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (DirectoryDeleteResponse, error) {
- req, err := client.deleteCreateRequest(ctx, recursiveDirectoryDelete, directoryDeleteOptions, leaseAccessConditions, modifiedAccessConditions)
- if err != nil {
- return DirectoryDeleteResponse{}, err
- }
- resp, err := client.con.Pipeline().Do(req)
- if err != nil {
- return DirectoryDeleteResponse{}, err
- }
- if !runtime.HasStatusCode(resp, http.StatusOK) {
- return DirectoryDeleteResponse{}, runtime.NewResponseError(resp)
- }
- return client.deleteHandleResponse(resp)
-}
-
-// deleteCreateRequest creates the Delete request.
-func (client *directoryClient) deleteCreateRequest(ctx context.Context, recursiveDirectoryDelete bool, directoryDeleteOptions *DirectoryDeleteOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) {
- req, err := runtime.NewRequest(ctx, http.MethodDelete, client.con.Endpoint())
- if err != nil {
- return nil, err
- }
- reqQP := req.Raw().URL.Query()
- if directoryDeleteOptions != nil && directoryDeleteOptions.Timeout != nil {
- reqQP.Set("timeout", strconv.FormatInt(int64(*directoryDeleteOptions.Timeout), 10))
- }
- reqQP.Set("recursive", strconv.FormatBool(recursiveDirectoryDelete))
- if directoryDeleteOptions != nil && directoryDeleteOptions.Marker != nil {
- reqQP.Set("continuation", *directoryDeleteOptions.Marker)
- }
- req.Raw().URL.RawQuery = reqQP.Encode()
- if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
- req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID)
- }
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
- req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123))
- }
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
- req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123))
- }
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil {
- req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch)
- }
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil {
- req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch)
- }
- req.Raw().Header.Set("x-ms-version", "2019-12-12")
- if directoryDeleteOptions != nil && directoryDeleteOptions.RequestID != nil {
- req.Raw().Header.Set("x-ms-client-request-id", *directoryDeleteOptions.RequestID)
- }
- req.Raw().Header.Set("Accept", "application/xml")
- return req, nil
-}
-
-// deleteHandleResponse handles the Delete response.
-func (client *directoryClient) deleteHandleResponse(resp *http.Response) (DirectoryDeleteResponse, error) {
- result := DirectoryDeleteResponse{RawResponse: resp}
- if val := resp.Header.Get("x-ms-continuation"); val != "" {
- result.Marker = &val
- }
- if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
- result.ClientRequestID = &val
- }
- if val := resp.Header.Get("x-ms-request-id"); val != "" {
- result.RequestID = &val
- }
- if val := resp.Header.Get("x-ms-version"); val != "" {
- result.Version = &val
- }
- if val := resp.Header.Get("Date"); val != "" {
- date, err := time.Parse(time.RFC1123, val)
- if err != nil {
- return DirectoryDeleteResponse{}, err
- }
- result.Date = &date
- }
- return result, nil
-}
-
-// GetAccessControl - Get the owner, group, permissions, or access control list for a directory.
-// If the operation fails it returns the *DataLakeStorageError error type.
-func (client *directoryClient) GetAccessControl(ctx context.Context, directoryGetAccessControlOptions *DirectoryGetAccessControlOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (DirectoryGetAccessControlResponse, error) {
- req, err := client.getAccessControlCreateRequest(ctx, directoryGetAccessControlOptions, leaseAccessConditions, modifiedAccessConditions)
- if err != nil {
- return DirectoryGetAccessControlResponse{}, err
- }
- resp, err := client.con.Pipeline().Do(req)
- if err != nil {
- return DirectoryGetAccessControlResponse{}, err
- }
- if !runtime.HasStatusCode(resp, http.StatusOK) {
- return DirectoryGetAccessControlResponse{}, runtime.NewResponseError(resp)
- }
- return client.getAccessControlHandleResponse(resp)
-}
-
-// getAccessControlCreateRequest creates the GetAccessControl request.
-func (client *directoryClient) getAccessControlCreateRequest(ctx context.Context, directoryGetAccessControlOptions *DirectoryGetAccessControlOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) {
- req, err := runtime.NewRequest(ctx, http.MethodHead, client.con.Endpoint())
- if err != nil {
- return nil, err
- }
- reqQP := req.Raw().URL.Query()
- reqQP.Set("action", "getAccessControl")
- if directoryGetAccessControlOptions != nil && directoryGetAccessControlOptions.Timeout != nil {
- reqQP.Set("timeout", strconv.FormatInt(int64(*directoryGetAccessControlOptions.Timeout), 10))
- }
- if directoryGetAccessControlOptions != nil && directoryGetAccessControlOptions.Upn != nil {
- reqQP.Set("upn", strconv.FormatBool(*directoryGetAccessControlOptions.Upn))
- }
- req.Raw().URL.RawQuery = reqQP.Encode()
- if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
- req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID)
- }
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil {
- req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch)
- }
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil {
- req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch)
- }
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
- req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123))
- }
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
- req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123))
- }
- if directoryGetAccessControlOptions != nil && directoryGetAccessControlOptions.RequestID != nil {
- req.Raw().Header.Set("x-ms-client-request-id", *directoryGetAccessControlOptions.RequestID)
- }
- req.Raw().Header.Set("x-ms-version", "2019-12-12")
- req.Raw().Header.Set("Accept", "application/xml")
- return req, nil
-}
-
-// getAccessControlHandleResponse handles the GetAccessControl response.
-func (client *directoryClient) getAccessControlHandleResponse(resp *http.Response) (DirectoryGetAccessControlResponse, error) {
- result := DirectoryGetAccessControlResponse{RawResponse: resp}
- if val := resp.Header.Get("Date"); val != "" {
- date, err := time.Parse(time.RFC1123, val)
- if err != nil {
- return DirectoryGetAccessControlResponse{}, err
- }
- result.Date = &date
- }
- if val := resp.Header.Get("ETag"); val != "" {
- result.ETag = &val
- }
- if val := resp.Header.Get("Last-Modified"); val != "" {
- lastModified, err := time.Parse(time.RFC1123, val)
- if err != nil {
- return DirectoryGetAccessControlResponse{}, err
- }
- result.LastModified = &lastModified
- }
- if val := resp.Header.Get("x-ms-owner"); val != "" {
- result.XMSOwner = &val
- }
- if val := resp.Header.Get("x-ms-group"); val != "" {
- result.XMSGroup = &val
- }
- if val := resp.Header.Get("x-ms-permissions"); val != "" {
- result.XMSPermissions = &val
- }
- if val := resp.Header.Get("x-ms-acl"); val != "" {
- result.XMSACL = &val
- }
- if val := resp.Header.Get("x-ms-request-id"); val != "" {
- result.RequestID = &val
- }
- if val := resp.Header.Get("x-ms-version"); val != "" {
- result.Version = &val
- }
- return result, nil
-}
-
-// Rename - Rename a directory. By default, the destination is overwritten and if the destination already exists and has a lease the lease is broken. This
-// operation supports conditional HTTP requests. For more
-// information, see Specifying Conditional Headers for Blob Service Operations [https://docs.microsoft.com/en-us/rest/api/storageservices/specifying-conditional-headers-for-blob-service-operations].
-// To
-// fail if the destination already exists, use a conditional request with If-None-Match: "*".
-// If the operation fails it returns the *DataLakeStorageError error type.
-func (client *directoryClient) Rename(ctx context.Context, renameSource string, directoryRenameOptions *DirectoryRenameOptions, directoryHTTPHeaders *DirectoryHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (DirectoryRenameResponse, error) {
- req, err := client.renameCreateRequest(ctx, renameSource, directoryRenameOptions, directoryHTTPHeaders, leaseAccessConditions, modifiedAccessConditions, sourceModifiedAccessConditions)
- if err != nil {
- return DirectoryRenameResponse{}, err
- }
- resp, err := client.con.Pipeline().Do(req)
- if err != nil {
- return DirectoryRenameResponse{}, err
- }
- if !runtime.HasStatusCode(resp, http.StatusCreated) {
- return DirectoryRenameResponse{}, runtime.NewResponseError(resp)
- }
- return client.renameHandleResponse(resp)
-}
-
-// renameCreateRequest creates the Rename request.
-func (client *directoryClient) renameCreateRequest(ctx context.Context, renameSource string, directoryRenameOptions *DirectoryRenameOptions, directoryHTTPHeaders *DirectoryHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (*policy.Request, error) {
- req, err := runtime.NewRequest(ctx, http.MethodPut, client.con.Endpoint())
- if err != nil {
- return nil, err
- }
- reqQP := req.Raw().URL.Query()
- if directoryRenameOptions != nil && directoryRenameOptions.Timeout != nil {
- reqQP.Set("timeout", strconv.FormatInt(int64(*directoryRenameOptions.Timeout), 10))
- }
- if directoryRenameOptions != nil && directoryRenameOptions.Marker != nil {
- reqQP.Set("continuation", *directoryRenameOptions.Marker)
- }
- if client.pathRenameMode != nil {
- reqQP.Set("mode", string(*client.pathRenameMode))
- }
- req.Raw().URL.RawQuery = reqQP.Encode()
- req.Raw().Header.Set("x-ms-rename-source", renameSource)
- if directoryRenameOptions != nil && directoryRenameOptions.DirectoryProperties != nil {
- req.Raw().Header.Set("x-ms-properties", *directoryRenameOptions.DirectoryProperties)
- }
- if directoryRenameOptions != nil && directoryRenameOptions.PosixPermissions != nil {
- req.Raw().Header.Set("x-ms-permissions", *directoryRenameOptions.PosixPermissions)
- }
- if directoryRenameOptions != nil && directoryRenameOptions.PosixUmask != nil {
- req.Raw().Header.Set("x-ms-umask", *directoryRenameOptions.PosixUmask)
- }
- if directoryHTTPHeaders != nil && directoryHTTPHeaders.CacheControl != nil {
- req.Raw().Header.Set("x-ms-cache-control", *directoryHTTPHeaders.CacheControl)
- }
- if directoryHTTPHeaders != nil && directoryHTTPHeaders.ContentType != nil {
- req.Raw().Header.Set("x-ms-content-type", *directoryHTTPHeaders.ContentType)
- }
- if directoryHTTPHeaders != nil && directoryHTTPHeaders.ContentEncoding != nil {
- req.Raw().Header.Set("x-ms-content-encoding", *directoryHTTPHeaders.ContentEncoding)
- }
- if directoryHTTPHeaders != nil && directoryHTTPHeaders.ContentLanguage != nil {
- req.Raw().Header.Set("x-ms-content-language", *directoryHTTPHeaders.ContentLanguage)
- }
- if directoryHTTPHeaders != nil && directoryHTTPHeaders.ContentDisposition != nil {
- req.Raw().Header.Set("x-ms-content-disposition", *directoryHTTPHeaders.ContentDisposition)
- }
- if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
- req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID)
- }
- if directoryRenameOptions != nil && directoryRenameOptions.SourceLeaseID != nil {
- req.Raw().Header.Set("x-ms-source-lease-id", *directoryRenameOptions.SourceLeaseID)
- }
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
- req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123))
- }
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
- req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123))
- }
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil {
- req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch)
- }
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil {
- req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch)
- }
- if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil {
- req.Raw().Header.Set("x-ms-source-if-modified-since", sourceModifiedAccessConditions.SourceIfModifiedSince.Format(time.RFC1123))
- }
- if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil {
- req.Raw().Header.Set("x-ms-source-if-unmodified-since", sourceModifiedAccessConditions.SourceIfUnmodifiedSince.Format(time.RFC1123))
- }
- if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil {
- req.Raw().Header.Set("x-ms-source-if-match", *sourceModifiedAccessConditions.SourceIfMatch)
- }
- if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil {
- req.Raw().Header.Set("x-ms-source-if-none-match", *sourceModifiedAccessConditions.SourceIfNoneMatch)
- }
- req.Raw().Header.Set("x-ms-version", "2019-12-12")
- if directoryRenameOptions != nil && directoryRenameOptions.RequestID != nil {
- req.Raw().Header.Set("x-ms-client-request-id", *directoryRenameOptions.RequestID)
- }
- req.Raw().Header.Set("Accept", "application/xml")
- return req, nil
-}
-
-// renameHandleResponse handles the Rename response.
-func (client *directoryClient) renameHandleResponse(resp *http.Response) (DirectoryRenameResponse, error) {
- result := DirectoryRenameResponse{RawResponse: resp}
- if val := resp.Header.Get("x-ms-continuation"); val != "" {
- result.Marker = &val
- }
- if val := resp.Header.Get("ETag"); val != "" {
- result.ETag = &val
- }
- if val := resp.Header.Get("Last-Modified"); val != "" {
- lastModified, err := time.Parse(time.RFC1123, val)
- if err != nil {
- return DirectoryRenameResponse{}, err
- }
- result.LastModified = &lastModified
- }
- if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
- result.ClientRequestID = &val
- }
- if val := resp.Header.Get("x-ms-request-id"); val != "" {
- result.RequestID = &val
- }
- if val := resp.Header.Get("x-ms-version"); val != "" {
- result.Version = &val
- }
- if val := resp.Header.Get("Content-Length"); val != "" {
- contentLength, err := strconv.ParseInt(val, 10, 64)
- if err != nil {
- return DirectoryRenameResponse{}, err
- }
- result.ContentLength = &contentLength
- }
- if val := resp.Header.Get("Date"); val != "" {
- date, err := time.Parse(time.RFC1123, val)
- if err != nil {
- return DirectoryRenameResponse{}, err
- }
- result.Date = &date
- }
- return result, nil
-}
-
-// SetAccessControl - Set the owner, group, permissions, or access control list for a directory.
-// If the operation fails it returns the *DataLakeStorageError error type.
-func (client *directoryClient) SetAccessControl(ctx context.Context, directorySetAccessControlOptions *DirectorySetAccessControlOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (DirectorySetAccessControlResponse, error) {
- req, err := client.setAccessControlCreateRequest(ctx, directorySetAccessControlOptions, leaseAccessConditions, modifiedAccessConditions)
- if err != nil {
- return DirectorySetAccessControlResponse{}, err
- }
- resp, err := client.con.Pipeline().Do(req)
- if err != nil {
- return DirectorySetAccessControlResponse{}, err
- }
- if !runtime.HasStatusCode(resp, http.StatusOK) {
- return DirectorySetAccessControlResponse{}, runtime.NewResponseError(resp)
- }
- return client.setAccessControlHandleResponse(resp)
-}
-
-// setAccessControlCreateRequest creates the SetAccessControl request.
-func (client *directoryClient) setAccessControlCreateRequest(ctx context.Context, directorySetAccessControlOptions *DirectorySetAccessControlOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) {
- req, err := runtime.NewRequest(ctx, http.MethodPatch, client.con.Endpoint())
- if err != nil {
- return nil, err
- }
- reqQP := req.Raw().URL.Query()
- reqQP.Set("action", "setAccessControl")
- if directorySetAccessControlOptions != nil && directorySetAccessControlOptions.Timeout != nil {
- reqQP.Set("timeout", strconv.FormatInt(int64(*directorySetAccessControlOptions.Timeout), 10))
- }
- req.Raw().URL.RawQuery = reqQP.Encode()
- if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
- req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID)
- }
- if directorySetAccessControlOptions != nil && directorySetAccessControlOptions.Owner != nil {
- req.Raw().Header.Set("x-ms-owner", *directorySetAccessControlOptions.Owner)
- }
- if directorySetAccessControlOptions != nil && directorySetAccessControlOptions.Group != nil {
- req.Raw().Header.Set("x-ms-group", *directorySetAccessControlOptions.Group)
- }
- if directorySetAccessControlOptions != nil && directorySetAccessControlOptions.PosixPermissions != nil {
- req.Raw().Header.Set("x-ms-permissions", *directorySetAccessControlOptions.PosixPermissions)
- }
- if directorySetAccessControlOptions != nil && directorySetAccessControlOptions.PosixACL != nil {
- req.Raw().Header.Set("x-ms-acl", *directorySetAccessControlOptions.PosixACL)
- }
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil {
- req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch)
- }
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil {
- req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch)
- }
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
- req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123))
- }
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
- req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123))
- }
- if directorySetAccessControlOptions != nil && directorySetAccessControlOptions.RequestID != nil {
- req.Raw().Header.Set("x-ms-client-request-id", *directorySetAccessControlOptions.RequestID)
- }
- req.Raw().Header.Set("x-ms-version", "2019-12-12")
- req.Raw().Header.Set("Accept", "application/xml")
- return req, nil
-}
-
-// setAccessControlHandleResponse handles the SetAccessControl response.
-func (client *directoryClient) setAccessControlHandleResponse(resp *http.Response) (DirectorySetAccessControlResponse, error) {
- result := DirectorySetAccessControlResponse{RawResponse: resp}
- if val := resp.Header.Get("Date"); val != "" {
- date, err := time.Parse(time.RFC1123, val)
- if err != nil {
- return DirectorySetAccessControlResponse{}, err
- }
- result.Date = &date
- }
- if val := resp.Header.Get("ETag"); val != "" {
- result.ETag = &val
- }
- if val := resp.Header.Get("Last-Modified"); val != "" {
- lastModified, err := time.Parse(time.RFC1123, val)
- if err != nil {
- return DirectorySetAccessControlResponse{}, err
- }
- result.LastModified = &lastModified
- }
- if val := resp.Header.Get("x-ms-request-id"); val != "" {
- result.RequestID = &val
- }
- if val := resp.Header.Get("x-ms-version"); val != "" {
- result.Version = &val
- }
- return result, nil
-}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_models.go
index 2e3885fe78c..d40d63b1b0d 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_models.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_models.go
@@ -1,5 +1,5 @@
-//go:build go1.16
-// +build go1.16
+//go:build go1.18
+// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
@@ -58,197 +58,50 @@ func (a *AccessPolicy) UnmarshalXML(d *xml.Decoder, start xml.StartElement) erro
return nil
}
-// AppendBlobAppendBlockFromURLOptions contains the optional parameters for the AppendBlob.AppendBlockFromURL method.
-type AppendBlobAppendBlockFromURLOptions struct {
- // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
- RequestID *string
- // Specify the md5 calculated for the range of bytes that must be read from the copy source.
- SourceContentMD5 []byte
- // Specify the crc64 calculated for the range of bytes that must be read from the copy source.
- SourceContentcrc64 []byte
- // Bytes of source data in the specified range.
- SourceRange *string
- // The timeout parameter is expressed in seconds. For more information, see Setting
- // Timeouts for Blob Service Operations.
- Timeout *int32
- // Specify the transactional md5 for the body, to be validated by the service.
- TransactionalContentMD5 []byte
-}
-
-// AppendBlobAppendBlockOptions contains the optional parameters for the AppendBlob.AppendBlock method.
-type AppendBlobAppendBlockOptions struct {
- // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
- RequestID *string
- // The timeout parameter is expressed in seconds. For more information, see Setting
- // Timeouts for Blob Service Operations.
- Timeout *int32
- // Specify the transactional crc64 for the body, to be validated by the service.
- TransactionalContentCRC64 []byte
- // Specify the transactional md5 for the body, to be validated by the service.
- TransactionalContentMD5 []byte
-}
-
-// AppendBlobCreateOptions contains the optional parameters for the AppendBlob.Create method.
-type AppendBlobCreateOptions struct {
- // Optional. Used to set blob tags in various blob operations.
- BlobTagsString *string
- // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the operation will copy the metadata
- // from the source blob or file to the destination blob. If one or more name-value pairs are specified, the destination blob is created with the specified
- // metadata, and metadata is not copied from the source blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming
- // rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more information.
- Metadata map[string]string
- // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
- RequestID *string
- // The timeout parameter is expressed in seconds. For more information, see Setting
- // Timeouts for Blob Service Operations.
- Timeout *int32
-}
-
-// AppendBlobSealOptions contains the optional parameters for the AppendBlob.Seal method.
-type AppendBlobSealOptions struct {
- // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
- RequestID *string
- // The timeout parameter is expressed in seconds. For more information, see Setting
- // Timeouts for Blob Service Operations.
- Timeout *int32
-}
-
-// AppendPositionAccessConditions contains a group of parameters for the AppendBlob.AppendBlock method.
+// AppendPositionAccessConditions contains a group of parameters for the appendBlobClient.AppendBlock method.
type AppendPositionAccessConditions struct {
- // Optional conditional header, used only for the Append Block operation. A number indicating the byte offset to compare. Append Block will succeed only
- // if the append position is equal to this number. If it is not, the request will fail with the AppendPositionConditionNotMet error (HTTP status code 412
- // - Precondition Failed).
+ // Optional conditional header, used only for the Append Block operation. A number indicating the byte offset to compare.
+ // Append Block will succeed only if the append position is equal to this number. If
+ // it is not, the request will fail with the AppendPositionConditionNotMet error (HTTP status code 412 - Precondition Failed).
AppendPosition *int64
- // Optional conditional header. The max length in bytes permitted for the append blob. If the Append Block operation would cause the blob to exceed that
- // limit or if the blob size is already greater than the value specified in this header, the request will fail with MaxBlobSizeConditionNotMet error (HTTP
- // status code 412 - Precondition Failed).
+ // Optional conditional header. The max length in bytes permitted for the append blob. If the Append Block operation would
+ // cause the blob to exceed that limit or if the blob size is already greater than
+ // the value specified in this header, the request will fail with MaxBlobSizeConditionNotMet error (HTTP status code 412 -
+ // Precondition Failed).
MaxSize *int64
}
-// BlobAbortCopyFromURLOptions contains the optional parameters for the Blob.AbortCopyFromURL method.
-type BlobAbortCopyFromURLOptions struct {
- // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
- RequestID *string
- // The timeout parameter is expressed in seconds. For more information, see Setting
- // Timeouts for Blob Service Operations.
- Timeout *int32
-}
-
-// BlobAcquireLeaseOptions contains the optional parameters for the Blob.AcquireLease method.
-type BlobAcquireLeaseOptions struct {
- // Specifies the duration of the lease, in seconds, or negative one (-1) for a lease that never expires. A non-infinite lease can be between 15 and 60 seconds.
- // A lease duration cannot be changed using renew or change.
- Duration *int32
- // Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed lease ID is not in the correct format. See
- // Guid Constructor (String) for a list of valid GUID string formats.
- ProposedLeaseID *string
- // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
- RequestID *string
- // The timeout parameter is expressed in seconds. For more information, see Setting
- // Timeouts for Blob Service Operations.
- Timeout *int32
-}
-
-// BlobBreakLeaseOptions contains the optional parameters for the Blob.BreakLease method.
-type BlobBreakLeaseOptions struct {
- // For a break operation, proposed duration the lease should continue before it is broken, in seconds, between 0 and 60. This break period is only used
- // if it is shorter than the time remaining on the lease. If longer, the time remaining on the lease is used. A new lease will not be available before the
- // break period has expired, but the lease may be held for longer than the break period. If this header does not appear with a break operation, a fixed-duration
- // lease breaks after the remaining lease period elapses, and an infinite lease breaks immediately.
- BreakPeriod *int32
- // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
- RequestID *string
- // The timeout parameter is expressed in seconds. For more information, see Setting
- // Timeouts for Blob Service Operations.
- Timeout *int32
-}
-
-// BlobChangeLeaseOptions contains the optional parameters for the Blob.ChangeLease method.
-type BlobChangeLeaseOptions struct {
- // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
- RequestID *string
- // The timeout parameter is expressed in seconds. For more information, see Setting
- // Timeouts for Blob Service Operations.
- Timeout *int32
-}
-
-// BlobCopyFromURLOptions contains the optional parameters for the Blob.CopyFromURL method.
-type BlobCopyFromURLOptions struct {
- // Optional. Used to set blob tags in various blob operations.
- BlobTagsString *string
- // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the operation will copy the metadata
- // from the source blob or file to the destination blob. If one or more name-value pairs are specified, the destination blob is created with the specified
- // metadata, and metadata is not copied from the source blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming
- // rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more information.
- Metadata map[string]string
- // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
- RequestID *string
- // Specify the md5 calculated for the range of bytes that must be read from the copy source.
- SourceContentMD5 []byte
- // Optional. Indicates the tier to be set on the blob.
- Tier *AccessTier
- // The timeout parameter is expressed in seconds. For more information, see Setting
- // Timeouts for Blob Service Operations.
- Timeout *int32
-}
-
-// BlobCreateSnapshotOptions contains the optional parameters for the Blob.CreateSnapshot method.
-type BlobCreateSnapshotOptions struct {
- // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the operation will copy the metadata
- // from the source blob or file to the destination blob. If one or more name-value pairs are specified, the destination blob is created with the specified
- // metadata, and metadata is not copied from the source blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming
- // rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more information.
- Metadata map[string]string
- // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
- RequestID *string
- // The timeout parameter is expressed in seconds. For more information, see Setting
- // Timeouts for Blob Service Operations.
- Timeout *int32
+// ArrowConfiguration - Groups the settings used for formatting the response if the response should be Arrow formatted.
+type ArrowConfiguration struct {
+ // REQUIRED
+ Schema []*ArrowField `xml:"Schema>Field"`
}
-// BlobDeleteOptions contains the optional parameters for the Blob.Delete method.
-type BlobDeleteOptions struct {
- // Required if the blob has associated snapshots. Specify one of the following two options: include: Delete the base blob and all of its snapshots. only:
- // Delete only the blob's snapshots and not the blob itself
- DeleteSnapshots *DeleteSnapshotsOptionType
- // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
- RequestID *string
- // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more information on working with
- // blob snapshots, see Creating a Snapshot
- // of a Blob.
- Snapshot *string
- // The timeout parameter is expressed in seconds. For more information, see Setting
- // Timeouts for Blob Service Operations.
- Timeout *int32
- // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. It's for service version 2019-10-10
- // and newer.
- VersionID *string
+// MarshalXML implements the xml.Marshaller interface for type ArrowConfiguration.
+func (a ArrowConfiguration) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
+ type alias ArrowConfiguration
+ aux := &struct {
+ *alias
+ Schema *[]*ArrowField `xml:"Schema>Field"`
+ }{
+ alias: (*alias)(&a),
+ }
+ if a.Schema != nil {
+ aux.Schema = &a.Schema
+ }
+ return e.EncodeElement(aux, start)
}
-// BlobDownloadOptions contains the optional parameters for the Blob.Download method.
-type BlobDownloadOptions struct {
- // Return only the bytes of the blob in the specified range.
- Range *string
- // When set to true and specified together with the Range, the service returns the CRC64 hash for the range, as long as the range is less than or equal
- // to 4 MB in size.
- RangeGetContentCRC64 *bool
- // When set to true and specified together with the Range, the service returns the MD5 hash for the range, as long as the range is less than or equal to
- // 4 MB in size.
- RangeGetContentMD5 *bool
- // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
- RequestID *string
- // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more information on working with
- // blob snapshots, see Creating a Snapshot
- // of a Blob.
- Snapshot *string
- // The timeout parameter is expressed in seconds. For more information, see Setting
- // Timeouts for Blob Service Operations.
- Timeout *int32
- // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. It's for service version 2019-10-10
- // and newer.
- VersionID *string
+// ArrowField - Groups settings regarding specific field of an arrow schema
+type ArrowField struct {
+ // REQUIRED
+ Type *string `xml:"Type"`
+ Name *string `xml:"Name"`
+ Precision *int32 `xml:"Precision"`
+ Scale *int32 `xml:"Scale"`
}
+// BlobFlatListSegment struct
type BlobFlatListSegment struct {
// REQUIRED
BlobItems []*BlobItemInternal `xml:"Blob"`
@@ -269,73 +122,26 @@ func (b BlobFlatListSegment) MarshalXML(e *xml.Encoder, start xml.StartElement)
return e.EncodeElement(aux, start)
}
-// BlobGetAccessControlOptions contains the optional parameters for the Blob.GetAccessControl method.
-type BlobGetAccessControlOptions struct {
- // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
- RequestID *string
- // The timeout parameter is expressed in seconds. For more information, see Setting
- // Timeouts for Blob Service Operations.
- Timeout *int32
- // Optional. Valid only when Hierarchical Namespace is enabled for the account. If "true", the identity values returned in the x-ms-owner, x-ms-group, and
- // x-ms-acl response headers will be transformed from Azure Active Directory Object IDs to User Principal Names. If "false", the values will be returned
- // as Azure Active Directory Object IDs. The default value is false.
- Upn *bool
-}
-
-// BlobGetAccountInfoOptions contains the optional parameters for the Blob.GetAccountInfo method.
-type BlobGetAccountInfoOptions struct {
- // placeholder for future optional parameters
-}
-
-// BlobGetPropertiesOptions contains the optional parameters for the Blob.GetProperties method.
-type BlobGetPropertiesOptions struct {
- // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
- RequestID *string
- // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more information on working with
- // blob snapshots, see Creating a Snapshot
- // of a Blob.
- Snapshot *string
- // The timeout parameter is expressed in seconds. For more information, see Setting
- // Timeouts for Blob Service Operations.
- Timeout *int32
- // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. It's for service version 2019-10-10
- // and newer.
- VersionID *string
-}
-
-// BlobGetTagsOptions contains the optional parameters for the Blob.GetTags method.
-type BlobGetTagsOptions struct {
- // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
- RequestID *string
- // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more information on working with
- // blob snapshots, see Creating a Snapshot
- // of a Blob.
- Snapshot *string
- // The timeout parameter is expressed in seconds. For more information, see Setting
- // Timeouts for Blob Service Operations.
- Timeout *int32
- // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. It's for service version 2019-10-10
- // and newer.
- VersionID *string
-}
-
-// BlobHTTPHeaders contains a group of parameters for the Blob.SetHTTPHeaders method.
+// BlobHTTPHeaders contains a group of parameters for the blobClient.SetHTTPHeaders method.
type BlobHTTPHeaders struct {
// Optional. Sets the blob's cache control. If specified, this property is stored with the blob and returned with a read request.
BlobCacheControl *string
// Optional. Sets the blob's Content-Disposition header.
BlobContentDisposition *string
- // Optional. Sets the blob's content encoding. If specified, this property is stored with the blob and returned with a read request.
+ // Optional. Sets the blob's content encoding. If specified, this property is stored with the blob and returned with a read
+ // request.
BlobContentEncoding *string
- // Optional. Set the blob's content language. If specified, this property is stored with the blob and returned with a read request.
+ // Optional. Set the blob's content language. If specified, this property is stored with the blob and returned with a read
+ // request.
BlobContentLanguage *string
- // Optional. An MD5 hash of the blob content. Note that this hash is not validated, as the hashes for the individual blocks were validated when each was
- // uploaded.
+ // Optional. An MD5 hash of the blob content. Note that this hash is not validated, as the hashes for the individual blocks
+ // were validated when each was uploaded.
BlobContentMD5 []byte
// Optional. Sets the blob's content type. If specified, this property is stored with the blob and returned with a read request.
BlobContentType *string
}
+// BlobHierarchyListSegment struct
type BlobHierarchyListSegment struct {
// REQUIRED
BlobItems []*BlobItemInternal `xml:"Blob"`
@@ -376,13 +182,16 @@ type BlobItemInternal struct {
Snapshot *string `xml:"Snapshot"`
// Blob tags
- BlobTags *BlobTags `xml:"Tags"`
- IsCurrentVersion *bool `xml:"IsCurrentVersion"`
- Metadata map[string]*string `xml:"Metadata"`
+ BlobTags *BlobTags `xml:"Tags"`
+ HasVersionsOnly *bool `xml:"HasVersionsOnly"`
+ IsCurrentVersion *bool `xml:"IsCurrentVersion"`
// Dictionary of
- ObjectReplicationMetadata map[string]*string `xml:"OrMetadata"`
- VersionID *string `xml:"VersionId"`
+ Metadata map[string]*string `xml:"Metadata"`
+
+ // Dictionary of
+ OrMetadata map[string]*string `xml:"OrMetadata"`
+ VersionID *string `xml:"VersionId"`
}
// UnmarshalXML implements the xml.Unmarshaller interface for type BlobItemInternal.
@@ -390,8 +199,8 @@ func (b *BlobItemInternal) UnmarshalXML(d *xml.Decoder, start xml.StartElement)
type alias BlobItemInternal
aux := &struct {
*alias
- Metadata additionalProperties `xml:"Metadata"`
- ObjectReplicationMetadata additionalProperties `xml:"OrMetadata"`
+ Metadata additionalProperties `xml:"Metadata"`
+ OrMetadata additionalProperties `xml:"OrMetadata"`
}{
alias: (*alias)(b),
}
@@ -399,10 +208,11 @@ func (b *BlobItemInternal) UnmarshalXML(d *xml.Decoder, start xml.StartElement)
return err
}
b.Metadata = (map[string]*string)(aux.Metadata)
- b.ObjectReplicationMetadata = (map[string]*string)(aux.ObjectReplicationMetadata)
+ b.OrMetadata = (map[string]*string)(aux.OrMetadata)
return nil
}
+// BlobPrefix struct
type BlobPrefix struct {
// REQUIRED
Name *string `xml:"Name"`
@@ -442,15 +252,20 @@ type BlobPropertiesInternal struct {
DestinationSnapshot *string `xml:"DestinationSnapshot"`
// The name of the encryption scope under which the blob is encrypted.
- EncryptionScope *string `xml:"EncryptionScope"`
- ExpiresOn *time.Time `xml:"Expiry-Time"`
- IncrementalCopy *bool `xml:"IncrementalCopy"`
- IsSealed *bool `xml:"IsSealed"`
- LeaseDuration *LeaseDurationType `xml:"LeaseDuration"`
- LeaseState *LeaseStateType `xml:"LeaseState"`
- LeaseStatus *LeaseStatusType `xml:"LeaseStatus"`
-
- // If an object is in rehydrate pending state then this header is returned with priority of rehydrate. Valid values are High and Standard.
+ EncryptionScope *string `xml:"EncryptionScope"`
+ ExpiresOn *time.Time `xml:"Expiry-Time"`
+ ImmutabilityPolicyExpiresOn *time.Time `xml:"ImmutabilityPolicyUntilDate"`
+ ImmutabilityPolicyMode *BlobImmutabilityPolicyMode `xml:"ImmutabilityPolicyMode"`
+ IncrementalCopy *bool `xml:"IncrementalCopy"`
+ IsSealed *bool `xml:"Sealed"`
+ LastAccessedOn *time.Time `xml:"LastAccessTime"`
+ LeaseDuration *LeaseDurationType `xml:"LeaseDuration"`
+ LeaseState *LeaseStateType `xml:"LeaseState"`
+ LeaseStatus *LeaseStatusType `xml:"LeaseStatus"`
+ LegalHold *bool `xml:"LegalHold"`
+
+ // If an object is in rehydrate pending state then this header is returned with priority of rehydrate. Valid values are High
+ // and Standard.
RehydratePriority *RehydratePriority `xml:"RehydratePriority"`
RemainingRetentionDays *int32 `xml:"RemainingRetentionDays"`
ServerEncrypted *bool `xml:"ServerEncrypted"`
@@ -462,21 +277,25 @@ func (b BlobPropertiesInternal) MarshalXML(e *xml.Encoder, start xml.StartElemen
type alias BlobPropertiesInternal
aux := &struct {
*alias
- AccessTierChangeTime *timeRFC1123 `xml:"AccessTierChangeTime"`
- ContentMD5 *[]byte `xml:"Content-MD5"`
- CopyCompletionTime *timeRFC1123 `xml:"CopyCompletionTime"`
- CreationTime *timeRFC1123 `xml:"Creation-Time"`
- DeletedTime *timeRFC1123 `xml:"DeletedTime"`
- ExpiresOn *timeRFC1123 `xml:"Expiry-Time"`
- LastModified *timeRFC1123 `xml:"Last-Modified"`
+ AccessTierChangeTime *timeRFC1123 `xml:"AccessTierChangeTime"`
+ ContentMD5 *[]byte `xml:"Content-MD5"`
+ CopyCompletionTime *timeRFC1123 `xml:"CopyCompletionTime"`
+ CreationTime *timeRFC1123 `xml:"Creation-Time"`
+ DeletedTime *timeRFC1123 `xml:"DeletedTime"`
+ ExpiresOn *timeRFC1123 `xml:"Expiry-Time"`
+ ImmutabilityPolicyExpiresOn *timeRFC1123 `xml:"ImmutabilityPolicyUntilDate"`
+ LastAccessedOn *timeRFC1123 `xml:"LastAccessTime"`
+ LastModified *timeRFC1123 `xml:"Last-Modified"`
}{
- alias: (*alias)(&b),
- AccessTierChangeTime: (*timeRFC1123)(b.AccessTierChangeTime),
- CopyCompletionTime: (*timeRFC1123)(b.CopyCompletionTime),
- CreationTime: (*timeRFC1123)(b.CreationTime),
- DeletedTime: (*timeRFC1123)(b.DeletedTime),
- ExpiresOn: (*timeRFC1123)(b.ExpiresOn),
- LastModified: (*timeRFC1123)(b.LastModified),
+ alias: (*alias)(&b),
+ AccessTierChangeTime: (*timeRFC1123)(b.AccessTierChangeTime),
+ CopyCompletionTime: (*timeRFC1123)(b.CopyCompletionTime),
+ CreationTime: (*timeRFC1123)(b.CreationTime),
+ DeletedTime: (*timeRFC1123)(b.DeletedTime),
+ ExpiresOn: (*timeRFC1123)(b.ExpiresOn),
+ ImmutabilityPolicyExpiresOn: (*timeRFC1123)(b.ImmutabilityPolicyExpiresOn),
+ LastAccessedOn: (*timeRFC1123)(b.LastAccessedOn),
+ LastModified: (*timeRFC1123)(b.LastModified),
}
if b.ContentMD5 != nil {
aux.ContentMD5 = &b.ContentMD5
@@ -489,13 +308,15 @@ func (b *BlobPropertiesInternal) UnmarshalXML(d *xml.Decoder, start xml.StartEle
type alias BlobPropertiesInternal
aux := &struct {
*alias
- AccessTierChangeTime *timeRFC1123 `xml:"AccessTierChangeTime"`
- ContentMD5 *[]byte `xml:"Content-MD5"`
- CopyCompletionTime *timeRFC1123 `xml:"CopyCompletionTime"`
- CreationTime *timeRFC1123 `xml:"Creation-Time"`
- DeletedTime *timeRFC1123 `xml:"DeletedTime"`
- ExpiresOn *timeRFC1123 `xml:"Expiry-Time"`
- LastModified *timeRFC1123 `xml:"Last-Modified"`
+ AccessTierChangeTime *timeRFC1123 `xml:"AccessTierChangeTime"`
+ ContentMD5 *[]byte `xml:"Content-MD5"`
+ CopyCompletionTime *timeRFC1123 `xml:"CopyCompletionTime"`
+ CreationTime *timeRFC1123 `xml:"Creation-Time"`
+ DeletedTime *timeRFC1123 `xml:"DeletedTime"`
+ ExpiresOn *timeRFC1123 `xml:"Expiry-Time"`
+ ImmutabilityPolicyExpiresOn *timeRFC1123 `xml:"ImmutabilityPolicyUntilDate"`
+ LastAccessedOn *timeRFC1123 `xml:"LastAccessTime"`
+ LastModified *timeRFC1123 `xml:"Last-Modified"`
}{
alias: (*alias)(b),
}
@@ -507,178 +328,13 @@ func (b *BlobPropertiesInternal) UnmarshalXML(d *xml.Decoder, start xml.StartEle
b.CreationTime = (*time.Time)(aux.CreationTime)
b.DeletedTime = (*time.Time)(aux.DeletedTime)
b.ExpiresOn = (*time.Time)(aux.ExpiresOn)
+ b.ImmutabilityPolicyExpiresOn = (*time.Time)(aux.ImmutabilityPolicyExpiresOn)
+ b.LastAccessedOn = (*time.Time)(aux.LastAccessedOn)
b.LastModified = (*time.Time)(aux.LastModified)
return nil
}
-// BlobQueryOptions contains the optional parameters for the Blob.Query method.
-type BlobQueryOptions struct {
- // the query request
- QueryRequest *QueryRequest
- // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
- RequestID *string
- // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more information on working with
- // blob snapshots, see Creating a Snapshot
- // of a Blob.
- Snapshot *string
- // The timeout parameter is expressed in seconds. For more information, see Setting
- // Timeouts for Blob Service Operations.
- Timeout *int32
-}
-
-// BlobReleaseLeaseOptions contains the optional parameters for the Blob.ReleaseLease method.
-type BlobReleaseLeaseOptions struct {
- // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
- RequestID *string
- // The timeout parameter is expressed in seconds. For more information, see Setting
- // Timeouts for Blob Service Operations.
- Timeout *int32
-}
-
-// BlobRenameOptions contains the optional parameters for the Blob.Rename method.
-type BlobRenameOptions struct {
- // Optional. User-defined properties to be stored with the file or directory, in the format of a comma-separated list of name and value pairs "n1=v1, n2=v2,
- // ...", where each value is base64 encoded.
- DirectoryProperties *string
- // Optional and only valid if Hierarchical Namespace is enabled for the account. Sets POSIX access permissions for the file owner, the file owning group,
- // and others. Each class may be granted read, write, or execute permission. The sticky bit is also supported. Both symbolic (rwxrw-rw-) and 4-digit octal
- // notation (e.g. 0766) are supported.
- PosixPermissions *string
- // Only valid if Hierarchical Namespace is enabled for the account. This umask restricts permission settings for file and directory, and will only be applied
- // when default Acl does not exist in parent directory. If the umask bit has set, it means that the corresponding permission will be disabled. Otherwise
- // the corresponding permission will be determined by the permission. A 4-digit octal notation (e.g. 0022) is supported here. If no umask was specified,
- // a default umask - 0027 will be used.
- PosixUmask *string
- // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
- RequestID *string
- // A lease ID for the source path. If specified, the source path must have an active lease and the lease ID must match.
- SourceLeaseID *string
- // The timeout parameter is expressed in seconds. For more information, see Setting
- // Timeouts for Blob Service Operations.
- Timeout *int32
-}
-
-// BlobRenewLeaseOptions contains the optional parameters for the Blob.RenewLease method.
-type BlobRenewLeaseOptions struct {
- // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
- RequestID *string
- // The timeout parameter is expressed in seconds. For more information, see Setting
- // Timeouts for Blob Service Operations.
- Timeout *int32
-}
-
-// BlobSetAccessControlOptions contains the optional parameters for the Blob.SetAccessControl method.
-type BlobSetAccessControlOptions struct {
- // Optional. The owning group of the blob or directory.
- Group *string
- // Optional. The owner of the blob or directory.
- Owner *string
- // Sets POSIX access control rights on files and directories. The value is a comma-separated list of access control entries. Each access control entry (ACE)
- // consists of a scope, a type, a user or group identifier, and permissions in the format "[scope:][type]:[id]:[permissions]".
- PosixACL *string
- // Optional and only valid if Hierarchical Namespace is enabled for the account. Sets POSIX access permissions for the file owner, the file owning group,
- // and others. Each class may be granted read, write, or execute permission. The sticky bit is also supported. Both symbolic (rwxrw-rw-) and 4-digit octal
- // notation (e.g. 0766) are supported.
- PosixPermissions *string
- // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
- RequestID *string
- // The timeout parameter is expressed in seconds. For more information, see Setting
- // Timeouts for Blob Service Operations.
- Timeout *int32
-}
-
-// BlobSetExpiryOptions contains the optional parameters for the Blob.SetExpiry method.
-type BlobSetExpiryOptions struct {
- // The time to set the blob to expiry
- ExpiresOn *string
- // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
- RequestID *string
- // The timeout parameter is expressed in seconds. For more information, see Setting
- // Timeouts for Blob Service Operations.
- Timeout *int32
-}
-
-// BlobSetHTTPHeadersOptions contains the optional parameters for the Blob.SetHTTPHeaders method.
-type BlobSetHTTPHeadersOptions struct {
- // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
- RequestID *string
- // The timeout parameter is expressed in seconds. For more information, see Setting
- // Timeouts for Blob Service Operations.
- Timeout *int32
-}
-
-// BlobSetMetadataOptions contains the optional parameters for the Blob.SetMetadata method.
-type BlobSetMetadataOptions struct {
- // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the operation will copy the metadata
- // from the source blob or file to the destination blob. If one or more name-value pairs are specified, the destination blob is created with the specified
- // metadata, and metadata is not copied from the source blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming
- // rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more information.
- Metadata map[string]string
- // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
- RequestID *string
- // The timeout parameter is expressed in seconds. For more information, see Setting
- // Timeouts for Blob Service Operations.
- Timeout *int32
-}
-
-// BlobSetTagsOptions contains the optional parameters for the Blob.SetTags method.
-type BlobSetTagsOptions struct {
- // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
- RequestID *string
- // Blob tags
- Tags *BlobTags
- // The timeout parameter is expressed in seconds. For more information, see Setting
- // Timeouts for Blob Service Operations.
- Timeout *int32
- // Specify the transactional crc64 for the body, to be validated by the service.
- TransactionalContentCRC64 []byte
- // Specify the transactional md5 for the body, to be validated by the service.
- TransactionalContentMD5 []byte
- // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. It's for service version 2019-10-10
- // and newer.
- VersionID *string
-}
-
-// BlobSetTierOptions contains the optional parameters for the Blob.SetTier method.
-type BlobSetTierOptions struct {
- // Optional: Indicates the priority with which to rehydrate an archived blob.
- RehydratePriority *RehydratePriority
- // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
- RequestID *string
- // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more information on working with
- // blob snapshots, see Creating a Snapshot
- // of a Blob.
- Snapshot *string
- // The timeout parameter is expressed in seconds. For more information, see Setting
- // Timeouts for Blob Service Operations.
- Timeout *int32
- // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. It's for service version 2019-10-10
- // and newer.
- VersionID *string
-}
-
-// BlobStartCopyFromURLOptions contains the optional parameters for the Blob.StartCopyFromURL method.
-type BlobStartCopyFromURLOptions struct {
- // Optional. Used to set blob tags in various blob operations.
- BlobTagsString *string
- // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the operation will copy the metadata
- // from the source blob or file to the destination blob. If one or more name-value pairs are specified, the destination blob is created with the specified
- // metadata, and metadata is not copied from the source blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming
- // rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more information.
- Metadata map[string]string
- // Optional: Indicates the priority with which to rehydrate an archived blob.
- RehydratePriority *RehydratePriority
- // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
- RequestID *string
- // Overrides the sealed state of the destination blob. Service version 2019-12-12 and newer.
- SealBlob *bool
- // Optional. Indicates the tier to be set on the blob.
- Tier *AccessTier
- // The timeout parameter is expressed in seconds. For more information, see Setting
- // Timeouts for Blob Service Operations.
- Timeout *int32
-}
-
+// BlobTag struct
type BlobTag struct {
// REQUIRED
Key *string `xml:"Key"`
@@ -709,15 +365,6 @@ func (b BlobTags) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
return e.EncodeElement(aux, start)
}
-// BlobUndeleteOptions contains the optional parameters for the Blob.Undelete method.
-type BlobUndeleteOptions struct {
- // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
- RequestID *string
- // The timeout parameter is expressed in seconds. For more information, see Setting
- // Timeouts for Blob Service Operations.
- Timeout *int32
-}
-
// Block - Represents a single block in a block blob. It describes the block's ID and size.
type Block struct {
// REQUIRED; The base64 encoded block ID.
@@ -727,89 +374,7 @@ type Block struct {
Size *int64 `xml:"Size"`
}
-// BlockBlobCommitBlockListOptions contains the optional parameters for the BlockBlob.CommitBlockList method.
-type BlockBlobCommitBlockListOptions struct {
- // Optional. Used to set blob tags in various blob operations.
- BlobTagsString *string
- // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the operation will copy the metadata
- // from the source blob or file to the destination blob. If one or more name-value pairs are specified, the destination blob is created with the specified
- // metadata, and metadata is not copied from the source blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming
- // rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more information.
- Metadata map[string]string
- // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
- RequestID *string
- // Optional. Indicates the tier to be set on the blob.
- Tier *AccessTier
- // The timeout parameter is expressed in seconds. For more information, see Setting
- // Timeouts for Blob Service Operations.
- Timeout *int32
- // Specify the transactional crc64 for the body, to be validated by the service.
- TransactionalContentCRC64 []byte
- // Specify the transactional md5 for the body, to be validated by the service.
- TransactionalContentMD5 []byte
-}
-
-// BlockBlobGetBlockListOptions contains the optional parameters for the BlockBlob.GetBlockList method.
-type BlockBlobGetBlockListOptions struct {
- // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
- RequestID *string
- // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more information on working with
- // blob snapshots, see Creating a Snapshot
- // of a Blob.
- Snapshot *string
- // The timeout parameter is expressed in seconds. For more information, see Setting
- // Timeouts for Blob Service Operations.
- Timeout *int32
-}
-
-// BlockBlobStageBlockFromURLOptions contains the optional parameters for the BlockBlob.StageBlockFromURL method.
-type BlockBlobStageBlockFromURLOptions struct {
- // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
- RequestID *string
- // Specify the md5 calculated for the range of bytes that must be read from the copy source.
- SourceContentMD5 []byte
- // Specify the crc64 calculated for the range of bytes that must be read from the copy source.
- SourceContentcrc64 []byte
- // Bytes of source data in the specified range.
- SourceRange *string
- // The timeout parameter is expressed in seconds. For more information, see Setting
- // Timeouts for Blob Service Operations.
- Timeout *int32
-}
-
-// BlockBlobStageBlockOptions contains the optional parameters for the BlockBlob.StageBlock method.
-type BlockBlobStageBlockOptions struct {
- // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
- RequestID *string
- // The timeout parameter is expressed in seconds. For more information, see Setting
- // Timeouts for Blob Service Operations.
- Timeout *int32
- // Specify the transactional crc64 for the body, to be validated by the service.
- TransactionalContentCRC64 []byte
- // Specify the transactional md5 for the body, to be validated by the service.
- TransactionalContentMD5 []byte
-}
-
-// BlockBlobUploadOptions contains the optional parameters for the BlockBlob.Upload method.
-type BlockBlobUploadOptions struct {
- // Optional. Used to set blob tags in various blob operations.
- BlobTagsString *string
- // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the operation will copy the metadata
- // from the source blob or file to the destination blob. If one or more name-value pairs are specified, the destination blob is created with the specified
- // metadata, and metadata is not copied from the source blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming
- // rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more information.
- Metadata map[string]string
- // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
- RequestID *string
- // Optional. Indicates the tier to be set on the blob.
- Tier *AccessTier
- // The timeout parameter is expressed in seconds. For more information, see Setting
- // Timeouts for Blob Service Operations.
- Timeout *int32
- // Specify the transactional md5 for the body, to be validated by the service.
- TransactionalContentMD5 []byte
-}
-
+// BlockList struct
type BlockList struct {
CommittedBlocks []*Block `xml:"CommittedBlocks>Block"`
UncommittedBlocks []*Block `xml:"UncommittedBlocks>Block"`
@@ -834,6 +399,7 @@ func (b BlockList) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
return e.EncodeElement(aux, start)
}
+// BlockLookupList struct
type BlockLookupList struct {
Committed []*string `xml:"Committed"`
Latest []*string `xml:"Latest"`
@@ -864,6 +430,7 @@ func (b BlockLookupList) MarshalXML(e *xml.Encoder, start xml.StartElement) erro
return e.EncodeElement(aux, start)
}
+// ClearRange enum
type ClearRange struct {
// REQUIRED
End *int64 `xml:"End"`
@@ -872,104 +439,20 @@ type ClearRange struct {
Start *int64 `xml:"Start"`
}
-// ContainerAcquireLeaseOptions contains the optional parameters for the Container.AcquireLease method.
-type ContainerAcquireLeaseOptions struct {
- // Specifies the duration of the lease, in seconds, or negative one (-1) for a lease that never expires. A non-infinite lease can be between 15 and 60 seconds.
- // A lease duration cannot be changed using renew or change.
- Duration *int32
- // Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed lease ID is not in the correct format. See
- // Guid Constructor (String) for a list of valid GUID string formats.
- ProposedLeaseID *string
- // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
- RequestID *string
- // The timeout parameter is expressed in seconds. For more information, see Setting
- // Timeouts for Blob Service Operations.
- Timeout *int32
-}
-
-// ContainerBreakLeaseOptions contains the optional parameters for the Container.BreakLease method.
-type ContainerBreakLeaseOptions struct {
- // For a break operation, proposed duration the lease should continue before it is broken, in seconds, between 0 and 60. This break period is only used
- // if it is shorter than the time remaining on the lease. If longer, the time remaining on the lease is used. A new lease will not be available before the
- // break period has expired, but the lease may be held for longer than the break period. If this header does not appear with a break operation, a fixed-duration
- // lease breaks after the remaining lease period elapses, and an infinite lease breaks immediately.
- BreakPeriod *int32
- // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
- RequestID *string
- // The timeout parameter is expressed in seconds. For more information, see Setting
- // Timeouts for Blob Service Operations.
- Timeout *int32
-}
-
-// ContainerChangeLeaseOptions contains the optional parameters for the Container.ChangeLease method.
-type ContainerChangeLeaseOptions struct {
- // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
- RequestID *string
- // The timeout parameter is expressed in seconds. For more information, see Setting
- // Timeouts for Blob Service Operations.
- Timeout *int32
-}
-
-// ContainerCpkScopeInfo contains a group of parameters for the Container.Create method.
+// ContainerCpkScopeInfo contains a group of parameters for the containerClient.Create method.
type ContainerCpkScopeInfo struct {
- // Optional. Version 2019-07-07 and later. Specifies the default encryption scope to set on the container and use for all future writes.
+ // Optional. Version 2019-07-07 and later. Specifies the default encryption scope to set on the container and use for all
+ // future writes.
DefaultEncryptionScope *string
- // Optional. Version 2019-07-07 and newer. If true, prevents any request from specifying a different encryption scope than the scope set on the container.
+ // Optional. Version 2019-07-07 and newer. If true, prevents any request from specifying a different encryption scope than
+ // the scope set on the container.
PreventEncryptionScopeOverride *bool
}
-// ContainerCreateOptions contains the optional parameters for the Container.Create method.
-type ContainerCreateOptions struct {
- // Specifies whether data in the container may be accessed publicly and the level of access
- Access *PublicAccessType
- // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the operation will copy the metadata
- // from the source blob or file to the destination blob. If one or more name-value pairs are specified, the destination blob is created with the specified
- // metadata, and metadata is not copied from the source blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming
- // rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more information.
- Metadata map[string]string
- // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
- RequestID *string
- // The timeout parameter is expressed in seconds. For more information, see Setting
- // Timeouts for Blob Service Operations.
- Timeout *int32
-}
-
-// ContainerDeleteOptions contains the optional parameters for the Container.Delete method.
-type ContainerDeleteOptions struct {
- // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
- RequestID *string
- // The timeout parameter is expressed in seconds. For more information, see Setting
- // Timeouts for Blob Service Operations.
- Timeout *int32
-}
-
-// ContainerGetAccessPolicyOptions contains the optional parameters for the Container.GetAccessPolicy method.
-type ContainerGetAccessPolicyOptions struct {
- // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
- RequestID *string
- // The timeout parameter is expressed in seconds. For more information, see Setting
- // Timeouts for Blob Service Operations.
- Timeout *int32
-}
-
-// ContainerGetAccountInfoOptions contains the optional parameters for the Container.GetAccountInfo method.
-type ContainerGetAccountInfoOptions struct {
- // placeholder for future optional parameters
-}
-
-// ContainerGetPropertiesOptions contains the optional parameters for the Container.GetProperties method.
-type ContainerGetPropertiesOptions struct {
- // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
- RequestID *string
- // The timeout parameter is expressed in seconds. For more information, see Setting
- // Timeouts for Blob Service Operations.
- Timeout *int32
-}
-
-// ContainerItem - An Azure Storage container
-type ContainerItem struct {
- // REQUIRED
- Name *string `xml:"Name"`
+// ContainerItem - An Azure Storage container
+type ContainerItem struct {
+ // REQUIRED
+ Name *string `xml:"Name"`
// REQUIRED; Properties of a container
Properties *ContainerProperties `xml:"Properties"`
@@ -996,67 +479,26 @@ func (c *ContainerItem) UnmarshalXML(d *xml.Decoder, start xml.StartElement) err
return nil
}
-// ContainerListBlobFlatSegmentOptions contains the optional parameters for the Container.ListBlobFlatSegment method.
-type ContainerListBlobFlatSegmentOptions struct {
- // Include this parameter to specify one or more datasets to include in the response.
- Include []ListBlobsIncludeItem
- // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The operation returns the NextMarker
- // value within the response body if the listing operation did not return all containers remaining to be listed with the current page. The NextMarker value
- // can be used as the value for the marker parameter in a subsequent call to request the next page of list items. The marker value is opaque to the client.
- Marker *string
- // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value greater than 5000, the server
- // will return up to 5000 items. Note that if the listing operation crosses a partition boundary, then the service will return a continuation token for
- // retrieving the remainder of the results. For this reason, it is possible that the service will return fewer results than specified by maxresults, or
- // than the default of 5000.
- Maxresults *int32
- // Filters the results to return only containers whose name begins with the specified prefix.
- Prefix *string
- // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
- RequestID *string
- // The timeout parameter is expressed in seconds. For more information, see Setting
- // Timeouts for Blob Service Operations.
- Timeout *int32
-}
-
-// ContainerListBlobHierarchySegmentOptions contains the optional parameters for the Container.ListBlobHierarchySegment method.
-type ContainerListBlobHierarchySegmentOptions struct {
- // Include this parameter to specify one or more datasets to include in the response.
- Include []ListBlobsIncludeItem
- // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The operation returns the NextMarker
- // value within the response body if the listing operation did not return all containers remaining to be listed with the current page. The NextMarker value
- // can be used as the value for the marker parameter in a subsequent call to request the next page of list items. The marker value is opaque to the client.
- Marker *string
- // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value greater than 5000, the server
- // will return up to 5000 items. Note that if the listing operation crosses a partition boundary, then the service will return a continuation token for
- // retrieving the remainder of the results. For this reason, it is possible that the service will return fewer results than specified by maxresults, or
- // than the default of 5000.
- Maxresults *int32
- // Filters the results to return only containers whose name begins with the specified prefix.
- Prefix *string
- // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
- RequestID *string
- // The timeout parameter is expressed in seconds. For more information, see Setting
- // Timeouts for Blob Service Operations.
- Timeout *int32
-}
-
// ContainerProperties - Properties of a container
type ContainerProperties struct {
// REQUIRED
Etag *string `xml:"Etag"`
// REQUIRED
- LastModified *time.Time `xml:"Last-Modified"`
- DefaultEncryptionScope *string `xml:"DefaultEncryptionScope"`
- DeletedTime *time.Time `xml:"DeletedTime"`
- HasImmutabilityPolicy *bool `xml:"HasImmutabilityPolicy"`
- HasLegalHold *bool `xml:"HasLegalHold"`
- LeaseDuration *LeaseDurationType `xml:"LeaseDuration"`
- LeaseState *LeaseStateType `xml:"LeaseState"`
- LeaseStatus *LeaseStatusType `xml:"LeaseStatus"`
- PreventEncryptionScopeOverride *bool `xml:"DenyEncryptionScopeOverride"`
- PublicAccess *PublicAccessType `xml:"PublicAccess"`
- RemainingRetentionDays *int32 `xml:"RemainingRetentionDays"`
+ LastModified *time.Time `xml:"Last-Modified"`
+ DefaultEncryptionScope *string `xml:"DefaultEncryptionScope"`
+ DeletedTime *time.Time `xml:"DeletedTime"`
+ HasImmutabilityPolicy *bool `xml:"HasImmutabilityPolicy"`
+ HasLegalHold *bool `xml:"HasLegalHold"`
+
+ // Indicates if version level worm is enabled on this container.
+ IsImmutableStorageWithVersioningEnabled *bool `xml:"ImmutableStorageWithVersioningEnabled"`
+ LeaseDuration *LeaseDurationType `xml:"LeaseDuration"`
+ LeaseState *LeaseStateType `xml:"LeaseState"`
+ LeaseStatus *LeaseStatusType `xml:"LeaseStatus"`
+ PreventEncryptionScopeOverride *bool `xml:"DenyEncryptionScopeOverride"`
+ PublicAccess *PublicAccessType `xml:"PublicAccess"`
+ RemainingRetentionDays *int32 `xml:"RemainingRetentionDays"`
}
// MarshalXML implements the xml.Marshaller interface for type ContainerProperties.
@@ -1092,68 +534,10 @@ func (c *ContainerProperties) UnmarshalXML(d *xml.Decoder, start xml.StartElemen
return nil
}
-// ContainerReleaseLeaseOptions contains the optional parameters for the Container.ReleaseLease method.
-type ContainerReleaseLeaseOptions struct {
- // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
- RequestID *string
- // The timeout parameter is expressed in seconds. For more information, see Setting
- // Timeouts for Blob Service Operations.
- Timeout *int32
-}
-
-// ContainerRenewLeaseOptions contains the optional parameters for the Container.RenewLease method.
-type ContainerRenewLeaseOptions struct {
- // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
- RequestID *string
- // The timeout parameter is expressed in seconds. For more information, see Setting
- // Timeouts for Blob Service Operations.
- Timeout *int32
-}
-
-// ContainerRestoreOptions contains the optional parameters for the Container.Restore method.
-type ContainerRestoreOptions struct {
- // Optional. Version 2019-12-12 and later. Specifies the name of the deleted container to restore.
- DeletedContainerName *string
- // Optional. Version 2019-12-12 and later. Specifies the version of the deleted container to restore.
- DeletedContainerVersion *string
- // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
- RequestID *string
- // The timeout parameter is expressed in seconds. For more information, see Setting
- // Timeouts for Blob Service Operations.
- Timeout *int32
-}
-
-// ContainerSetAccessPolicyOptions contains the optional parameters for the Container.SetAccessPolicy method.
-type ContainerSetAccessPolicyOptions struct {
- // Specifies whether data in the container may be accessed publicly and the level of access
- Access *PublicAccessType
- // the acls for the container
- ContainerACL []*SignedIdentifier
- // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
- RequestID *string
- // The timeout parameter is expressed in seconds. For more information, see Setting
- // Timeouts for Blob Service Operations.
- Timeout *int32
-}
-
-// ContainerSetMetadataOptions contains the optional parameters for the Container.SetMetadata method.
-type ContainerSetMetadataOptions struct {
- // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the operation will copy the metadata
- // from the source blob or file to the destination blob. If one or more name-value pairs are specified, the destination blob is created with the specified
- // metadata, and metadata is not copied from the source blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming
- // rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more information.
- Metadata map[string]string
- // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
- RequestID *string
- // The timeout parameter is expressed in seconds. For more information, see Setting
- // Timeouts for Blob Service Operations.
- Timeout *int32
-}
-
-// CorsRule - CORS is an HTTP feature that enables a web application running under one domain to access resources in another domain. Web browsers implement
-// a security restriction known as same-origin policy that
-// prevents a web page from calling APIs in a different domain; CORS provides a secure way to allow one domain (the origin domain) to call APIs in another
-// domain
+// CorsRule - CORS is an HTTP feature that enables a web application running under one domain to access resources in another
+// domain. Web browsers implement a security restriction known as same-origin policy that
+// prevents a web page from calling APIs in a different domain; CORS provides a secure way to allow one domain (the origin
+// domain) to call APIs in another domain
type CorsRule struct {
// REQUIRED; the request headers that the origin domain may specify on the CORS request.
AllowedHeaders *string `xml:"AllowedHeaders"`
@@ -1161,180 +545,59 @@ type CorsRule struct {
// REQUIRED; The methods (HTTP request verbs) that the origin domain may use for a CORS request. (comma separated)
AllowedMethods *string `xml:"AllowedMethods"`
- // REQUIRED; The origin domains that are permitted to make a request against the storage service via CORS. The origin domain is the domain from which the
- // request originates. Note that the origin must be an exact
- // case-sensitive match with the origin that the user age sends to the service. You can also use the wildcard character '*' to allow all origin domains
- // to make requests via CORS.
+ // REQUIRED; The origin domains that are permitted to make a request against the storage service via CORS. The origin domain
+ // is the domain from which the request originates. Note that the origin must be an exact
+ // case-sensitive match with the origin that the user age sends to the service. You can also use the wildcard character '*'
+ // to allow all origin domains to make requests via CORS.
AllowedOrigins *string `xml:"AllowedOrigins"`
- // REQUIRED; The response headers that may be sent in the response to the CORS request and exposed by the browser to the request issuer
+ // REQUIRED; The response headers that may be sent in the response to the CORS request and exposed by the browser to the request
+ // issuer
ExposedHeaders *string `xml:"ExposedHeaders"`
// REQUIRED; The maximum amount time that a browser should cache the preflight OPTIONS request.
MaxAgeInSeconds *int32 `xml:"MaxAgeInSeconds"`
}
-// CpkInfo contains a group of parameters for the Blob.Download method.
+// CpkInfo contains a group of parameters for the blobClient.Download method.
type CpkInfo struct {
- // The algorithm used to produce the encryption key hash. Currently, the only accepted value is "AES256". Must be provided if the x-ms-encryption-key header
- // is provided.
- EncryptionAlgorithm *string
- // Optional. Specifies the encryption key to use to encrypt the data provided in the request. If not specified, encryption is performed with the root account
- // encryption key. For more information, see Encryption at Rest for Azure Storage Services.
+ // The algorithm used to produce the encryption key hash. Currently, the only accepted value is "AES256". Must be provided
+ // if the x-ms-encryption-key header is provided.
+ EncryptionAlgorithm *EncryptionAlgorithmType
+ // Optional. Specifies the encryption key to use to encrypt the data provided in the request. If not specified, encryption
+ // is performed with the root account encryption key. For more information, see
+ // Encryption at Rest for Azure Storage Services.
EncryptionKey *string
// The SHA-256 hash of the provided encryption key. Must be provided if the x-ms-encryption-key header is provided.
EncryptionKeySHA256 *string
}
-// CpkScopeInfo contains a group of parameters for the Blob.SetMetadata method.
+// CpkScopeInfo contains a group of parameters for the blobClient.SetMetadata method.
type CpkScopeInfo struct {
- // Optional. Version 2019-07-07 and later. Specifies the name of the encryption scope to use to encrypt the data provided in the request. If not specified,
- // encryption is performed with the default account encryption scope. For more information, see Encryption at Rest for Azure Storage Services.
+ // Optional. Version 2019-07-07 and later. Specifies the name of the encryption scope to use to encrypt the data provided
+ // in the request. If not specified, encryption is performed with the default
+ // account encryption scope. For more information, see Encryption at Rest for Azure Storage Services.
EncryptionScope *string
}
-// Implements the error and azcore.HTTPResponse interfaces.
-type DataLakeStorageError struct {
- raw string
- // The service error response object.
- DataLakeStorageErrorDetails *DataLakeStorageErrorError `json:"error,omitempty"`
-}
-
-// DataLakeStorageErrorError - The service error response object.
-type DataLakeStorageErrorError struct {
- // The service error code.
- Code *string `json:"Code,omitempty"`
-
- // The service error message.
- Message *string `json:"Message,omitempty"`
-}
-
-// DelimitedTextConfiguration - delimited text configuration
+// DelimitedTextConfiguration - Groups the settings used for interpreting the blob data if the blob is delimited text formatted.
type DelimitedTextConfiguration struct {
- // REQUIRED; column separator
+ // The string used to separate columns.
ColumnSeparator *string `xml:"ColumnSeparator"`
- // REQUIRED; escape char
+ // The string used as an escape character.
EscapeChar *string `xml:"EscapeChar"`
- // REQUIRED; field quote
+ // The string used to quote a specific field.
FieldQuote *string `xml:"FieldQuote"`
- // REQUIRED; has headers
+ // Represents whether the data has headers.
HeadersPresent *bool `xml:"HasHeaders"`
- // REQUIRED; record separator
+ // The string used to separate records.
RecordSeparator *string `xml:"RecordSeparator"`
}
-// DirectoryCreateOptions contains the optional parameters for the Directory.Create method.
-type DirectoryCreateOptions struct {
- // Optional. User-defined properties to be stored with the file or directory, in the format of a comma-separated list of name and value pairs "n1=v1, n2=v2,
- // ...", where each value is base64 encoded.
- DirectoryProperties *string
- // Optional and only valid if Hierarchical Namespace is enabled for the account. Sets POSIX access permissions for the file owner, the file owning group,
- // and others. Each class may be granted read, write, or execute permission. The sticky bit is also supported. Both symbolic (rwxrw-rw-) and 4-digit octal
- // notation (e.g. 0766) are supported.
- PosixPermissions *string
- // Only valid if Hierarchical Namespace is enabled for the account. This umask restricts permission settings for file and directory, and will only be applied
- // when default Acl does not exist in parent directory. If the umask bit has set, it means that the corresponding permission will be disabled. Otherwise
- // the corresponding permission will be determined by the permission. A 4-digit octal notation (e.g. 0022) is supported here. If no umask was specified,
- // a default umask - 0027 will be used.
- PosixUmask *string
- // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
- RequestID *string
- // The timeout parameter is expressed in seconds. For more information, see Setting
- // Timeouts for Blob Service Operations.
- Timeout *int32
-}
-
-// DirectoryDeleteOptions contains the optional parameters for the Directory.Delete method.
-type DirectoryDeleteOptions struct {
- // When renaming a directory, the number of paths that are renamed with each invocation is limited. If the number of paths to be renamed exceeds this limit,
- // a continuation token is returned in this response header. When a continuation token is returned in the response, it must be specified in a subsequent
- // invocation of the rename operation to continue renaming the directory.
- Marker *string
- // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
- RequestID *string
- // The timeout parameter is expressed in seconds. For more information, see Setting
- // Timeouts for Blob Service Operations.
- Timeout *int32
-}
-
-// DirectoryGetAccessControlOptions contains the optional parameters for the Directory.GetAccessControl method.
-type DirectoryGetAccessControlOptions struct {
- // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
- RequestID *string
- // The timeout parameter is expressed in seconds. For more information, see Setting
- // Timeouts for Blob Service Operations.
- Timeout *int32
- // Optional. Valid only when Hierarchical Namespace is enabled for the account. If "true", the identity values returned in the x-ms-owner, x-ms-group, and
- // x-ms-acl response headers will be transformed from Azure Active Directory Object IDs to User Principal Names. If "false", the values will be returned
- // as Azure Active Directory Object IDs. The default value is false.
- Upn *bool
-}
-
-// DirectoryHTTPHeaders contains a group of parameters for the Directory.Create method.
-type DirectoryHTTPHeaders struct {
- // Cache control for given resource
- CacheControl *string
- // Content disposition for given resource
- ContentDisposition *string
- // Content encoding for given resource
- ContentEncoding *string
- // Content language for given resource
- ContentLanguage *string
- // Content type for given resource
- ContentType *string
-}
-
-// DirectoryRenameOptions contains the optional parameters for the Directory.Rename method.
-type DirectoryRenameOptions struct {
- // Optional. User-defined properties to be stored with the file or directory, in the format of a comma-separated list of name and value pairs "n1=v1, n2=v2,
- // ...", where each value is base64 encoded.
- DirectoryProperties *string
- // When renaming a directory, the number of paths that are renamed with each invocation is limited. If the number of paths to be renamed exceeds this limit,
- // a continuation token is returned in this response header. When a continuation token is returned in the response, it must be specified in a subsequent
- // invocation of the rename operation to continue renaming the directory.
- Marker *string
- // Optional and only valid if Hierarchical Namespace is enabled for the account. Sets POSIX access permissions for the file owner, the file owning group,
- // and others. Each class may be granted read, write, or execute permission. The sticky bit is also supported. Both symbolic (rwxrw-rw-) and 4-digit octal
- // notation (e.g. 0766) are supported.
- PosixPermissions *string
- // Only valid if Hierarchical Namespace is enabled for the account. This umask restricts permission settings for file and directory, and will only be applied
- // when default Acl does not exist in parent directory. If the umask bit has set, it means that the corresponding permission will be disabled. Otherwise
- // the corresponding permission will be determined by the permission. A 4-digit octal notation (e.g. 0022) is supported here. If no umask was specified,
- // a default umask - 0027 will be used.
- PosixUmask *string
- // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
- RequestID *string
- // A lease ID for the source path. If specified, the source path must have an active lease and the lease ID must match.
- SourceLeaseID *string
- // The timeout parameter is expressed in seconds. For more information, see Setting
- // Timeouts for Blob Service Operations.
- Timeout *int32
-}
-
-// DirectorySetAccessControlOptions contains the optional parameters for the Directory.SetAccessControl method.
-type DirectorySetAccessControlOptions struct {
- // Optional. The owning group of the blob or directory.
- Group *string
- // Optional. The owner of the blob or directory.
- Owner *string
- // Sets POSIX access control rights on files and directories. The value is a comma-separated list of access control entries. Each access control entry (ACE)
- // consists of a scope, a type, a user or group identifier, and permissions in the format "[scope:][type]:[id]:[permissions]".
- PosixACL *string
- // Optional and only valid if Hierarchical Namespace is enabled for the account. Sets POSIX access permissions for the file owner, the file owning group,
- // and others. Each class may be granted read, write, or execute permission. The sticky bit is also supported. Both symbolic (rwxrw-rw-) and 4-digit octal
- // notation (e.g. 0766) are supported.
- PosixPermissions *string
- // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
- RequestID *string
- // The timeout parameter is expressed in seconds. For more information, see Setting
- // Timeouts for Blob Service Operations.
- Timeout *int32
-}
-
// FilterBlobItem - Blob info from a Filter Blobs API call
type FilterBlobItem struct {
// REQUIRED
@@ -1343,8 +606,8 @@ type FilterBlobItem struct {
// REQUIRED
Name *string `xml:"Name"`
- // REQUIRED
- TagValue *string `xml:"TagValue"`
+ // Blob tags
+ Tags *BlobTags `xml:"Tags"`
}
// FilterBlobSegment - The result of a Filter Blobs API call
@@ -1377,13 +640,13 @@ func (f FilterBlobSegment) MarshalXML(e *xml.Encoder, start xml.StartElement) er
// GeoReplication - Geo-Replication information for the Secondary Storage Service
type GeoReplication struct {
- // REQUIRED; A GMT date/time value, to the second. All primary writes preceding this value are guaranteed to be available for read operations at the secondary.
- // Primary writes after this point in time may or may
+ // REQUIRED; A GMT date/time value, to the second. All primary writes preceding this value are guaranteed to be available
+ // for read operations at the secondary. Primary writes after this point in time may or may
// not be available for reads.
LastSyncTime *time.Time `xml:"LastSyncTime"`
// REQUIRED; The status of the secondary location
- Status *GeoReplicationStatusType `xml:"Status"`
+ Status *BlobGeoReplicationStatus `xml:"Status"`
}
// MarshalXML implements the xml.Marshaller interface for type GeoReplication.
@@ -1417,7 +680,7 @@ func (g *GeoReplication) UnmarshalXML(d *xml.Decoder, start xml.StartElement) er
// JSONTextConfiguration - json text configuration
type JSONTextConfiguration struct {
- // REQUIRED; record separator
+ // The string used to separate records.
RecordSeparator *string `xml:"RecordSeparator"`
}
@@ -1430,7 +693,7 @@ type KeyInfo struct {
Start *string `xml:"Start"`
}
-// LeaseAccessConditions contains a group of parameters for the Container.GetProperties method.
+// LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method.
type LeaseAccessConditions struct {
// If specified, the operation only succeeds if the resource's lease is active and matches this ID.
LeaseID *string
@@ -1544,137 +807,10 @@ type ModifiedAccessConditions struct {
IfUnmodifiedSince *time.Time
}
-// PageBlobClearPagesOptions contains the optional parameters for the PageBlob.ClearPages method.
-type PageBlobClearPagesOptions struct {
- // Return only the bytes of the blob in the specified range.
- Range *string
- // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
- RequestID *string
- // The timeout parameter is expressed in seconds. For more information, see Setting
- // Timeouts for Blob Service Operations.
- Timeout *int32
-}
-
-// PageBlobCopyIncrementalOptions contains the optional parameters for the PageBlob.CopyIncremental method.
-type PageBlobCopyIncrementalOptions struct {
- // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
- RequestID *string
- // The timeout parameter is expressed in seconds. For more information, see Setting
- // Timeouts for Blob Service Operations.
- Timeout *int32
-}
-
-// PageBlobCreateOptions contains the optional parameters for the PageBlob.Create method.
-type PageBlobCreateOptions struct {
- // Set for page blobs only. The sequence number is a user-controlled value that you can use to track requests. The value of the sequence number must be
- // between 0 and 2^63 - 1.
- BlobSequenceNumber *int64
- // Optional. Used to set blob tags in various blob operations.
- BlobTagsString *string
- // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the operation will copy the metadata
- // from the source blob or file to the destination blob. If one or more name-value pairs are specified, the destination blob is created with the specified
- // metadata, and metadata is not copied from the source blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming
- // rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more information.
- Metadata map[string]string
- // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
- RequestID *string
- // Optional. Indicates the tier to be set on the page blob.
- Tier *PremiumPageBlobAccessTier
- // The timeout parameter is expressed in seconds. For more information, see Setting
- // Timeouts for Blob Service Operations.
- Timeout *int32
-}
-
-// PageBlobGetPageRangesDiffOptions contains the optional parameters for the PageBlob.GetPageRangesDiff method.
-type PageBlobGetPageRangesDiffOptions struct {
- // Optional. This header is only supported in service versions 2019-04-19 and after and specifies the URL of a previous snapshot of the target blob. The
- // response will only contain pages that were changed between the target blob and its previous snapshot.
- PrevSnapshotURL *string
- // Optional in version 2015-07-08 and newer. The prevsnapshot parameter is a DateTime value that specifies that the response will contain only pages that
- // were changed between target blob and previous snapshot. Changed pages include both updated and cleared pages. The target blob may be a snapshot, as long
- // as the snapshot specified by prevsnapshot is the older of the two. Note that incremental snapshots are currently supported only for blobs created on
- // or after January 1, 2016.
- Prevsnapshot *string
- // Return only the bytes of the blob in the specified range.
- Range *string
- // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
- RequestID *string
- // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more information on working with
- // blob snapshots, see Creating a Snapshot
- // of a Blob.
- Snapshot *string
- // The timeout parameter is expressed in seconds. For more information, see Setting
- // Timeouts for Blob Service Operations.
- Timeout *int32
-}
-
-// PageBlobGetPageRangesOptions contains the optional parameters for the PageBlob.GetPageRanges method.
-type PageBlobGetPageRangesOptions struct {
- // Return only the bytes of the blob in the specified range.
- Range *string
- // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
- RequestID *string
- // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more information on working with
- // blob snapshots, see Creating a Snapshot
- // of a Blob.
- Snapshot *string
- // The timeout parameter is expressed in seconds. For more information, see Setting
- // Timeouts for Blob Service Operations.
- Timeout *int32
-}
-
-// PageBlobResizeOptions contains the optional parameters for the PageBlob.Resize method.
-type PageBlobResizeOptions struct {
- // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
- RequestID *string
- // The timeout parameter is expressed in seconds. For more information, see Setting
- // Timeouts for Blob Service Operations.
- Timeout *int32
-}
-
-// PageBlobUpdateSequenceNumberOptions contains the optional parameters for the PageBlob.UpdateSequenceNumber method.
-type PageBlobUpdateSequenceNumberOptions struct {
- // Set for page blobs only. The sequence number is a user-controlled value that you can use to track requests. The value of the sequence number must be
- // between 0 and 2^63 - 1.
- BlobSequenceNumber *int64
- // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
- RequestID *string
- // The timeout parameter is expressed in seconds. For more information, see Setting
- // Timeouts for Blob Service Operations.
- Timeout *int32
-}
-
-// PageBlobUploadPagesFromURLOptions contains the optional parameters for the PageBlob.UploadPagesFromURL method.
-type PageBlobUploadPagesFromURLOptions struct {
- // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
- RequestID *string
- // Specify the md5 calculated for the range of bytes that must be read from the copy source.
- SourceContentMD5 []byte
- // Specify the crc64 calculated for the range of bytes that must be read from the copy source.
- SourceContentcrc64 []byte
- // The timeout parameter is expressed in seconds. For more information, see Setting
- // Timeouts for Blob Service Operations.
- Timeout *int32
-}
-
-// PageBlobUploadPagesOptions contains the optional parameters for the PageBlob.UploadPages method.
-type PageBlobUploadPagesOptions struct {
- // Return only the bytes of the blob in the specified range.
- Range *string
- // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
- RequestID *string
- // The timeout parameter is expressed in seconds. For more information, see Setting
- // Timeouts for Blob Service Operations.
- Timeout *int32
- // Specify the transactional crc64 for the body, to be validated by the service.
- TransactionalContentCRC64 []byte
- // Specify the transactional md5 for the body, to be validated by the service.
- TransactionalContentMD5 []byte
-}
-
// PageList - the list of pages
type PageList struct {
ClearRange []*ClearRange `xml:"ClearRange"`
+ NextMarker *string `xml:"NextMarker"`
PageRange []*PageRange `xml:"PageRange"`
}
@@ -1697,6 +833,7 @@ func (p PageList) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
return e.EncodeElement(aux, start)
}
+// PageRange struct
type PageRange struct {
// REQUIRED
End *int64 `xml:"End"`
@@ -1705,15 +842,22 @@ type PageRange struct {
Start *int64 `xml:"Start"`
}
+// QueryFormat struct
type QueryFormat struct {
+ // REQUIRED; The quick query format type.
+ Type *QueryFormatType `xml:"Type"`
+
+ // Groups the settings used for formatting the response if the response should be Arrow formatted.
+ ArrowConfiguration *ArrowConfiguration `xml:"ArrowConfiguration"`
+
// Groups the settings used for interpreting the blob data if the blob is delimited text formatted.
DelimitedTextConfiguration *DelimitedTextConfiguration `xml:"DelimitedTextConfiguration"`
// json text configuration
JSONTextConfiguration *JSONTextConfiguration `xml:"JsonTextConfiguration"`
- // The quick query format type.
- Type *QueryFormatType `xml:"Type"`
+ // Anything
+ ParquetTextConfiguration interface{} `xml:"ParquetTextConfiguration"`
}
// QueryRequest - Groups the set of query request settings.
@@ -1739,6 +883,7 @@ func (q QueryRequest) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
return e.EncodeElement(aux, start)
}
+//QuerySerialization struct
type QuerySerialization struct {
// REQUIRED
Format *QueryFormat `xml:"Format"`
@@ -1749,11 +894,15 @@ type RetentionPolicy struct {
// REQUIRED; Indicates whether a retention policy is enabled for the storage service
Enabled *bool `xml:"Enabled"`
- // Indicates the number of days that metrics or logging or soft-deleted data should be retained. All data older than this value will be deleted
+ // Indicates whether permanent delete is allowed on this storage account.
+ AllowPermanentDelete *bool `xml:"AllowPermanentDelete"`
+
+ // Indicates the number of days that metrics or logging or soft-deleted data should be retained. All data older than this
+ // value will be deleted
Days *int32 `xml:"Days"`
}
-// SequenceNumberAccessConditions contains a group of parameters for the PageBlob.UploadPages method.
+// SequenceNumberAccessConditions contains a group of parameters for the pageBlobClient.UploadPages method.
type SequenceNumberAccessConditions struct {
// Specify this header value to operate only on a blob if it has the specified sequence number.
IfSequenceNumberEqualTo *int64
@@ -1763,98 +912,6 @@ type SequenceNumberAccessConditions struct {
IfSequenceNumberLessThanOrEqualTo *int64
}
-// ServiceFilterBlobsOptions contains the optional parameters for the Service.FilterBlobs method.
-type ServiceFilterBlobsOptions struct {
- // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The operation returns the NextMarker
- // value within the response body if the listing operation did not return all containers remaining to be listed with the current page. The NextMarker value
- // can be used as the value for the marker parameter in a subsequent call to request the next page of list items. The marker value is opaque to the client.
- Marker *string
- // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value greater than 5000, the server
- // will return up to 5000 items. Note that if the listing operation crosses a partition boundary, then the service will return a continuation token for
- // retrieving the remainder of the results. For this reason, it is possible that the service will return fewer results than specified by maxresults, or
- // than the default of 5000.
- Maxresults *int32
- // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
- RequestID *string
- // The timeout parameter is expressed in seconds. For more information, see Setting
- // Timeouts for Blob Service Operations.
- Timeout *int32
- // Filters the results to return only to return only blobs whose tags match the specified expression.
- Where *string
-}
-
-// ServiceGetAccountInfoOptions contains the optional parameters for the Service.GetAccountInfo method.
-type ServiceGetAccountInfoOptions struct {
- // placeholder for future optional parameters
-}
-
-// ServiceGetPropertiesOptions contains the optional parameters for the Service.GetProperties method.
-type ServiceGetPropertiesOptions struct {
- // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
- RequestID *string
- // The timeout parameter is expressed in seconds. For more information, see Setting
- // Timeouts for Blob Service Operations.
- Timeout *int32
-}
-
-// ServiceGetStatisticsOptions contains the optional parameters for the Service.GetStatistics method.
-type ServiceGetStatisticsOptions struct {
- // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
- RequestID *string
- // The timeout parameter is expressed in seconds. For more information, see Setting
- // Timeouts for Blob Service Operations.
- Timeout *int32
-}
-
-// ServiceGetUserDelegationKeyOptions contains the optional parameters for the Service.GetUserDelegationKey method.
-type ServiceGetUserDelegationKeyOptions struct {
- // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
- RequestID *string
- // The timeout parameter is expressed in seconds. For more information, see Setting
- // Timeouts for Blob Service Operations.
- Timeout *int32
-}
-
-// ServiceListContainersSegmentOptions contains the optional parameters for the Service.ListContainers method.
-type ServiceListContainersSegmentOptions struct {
- // Include this parameter to specify that the container's metadata be returned as part of the response body.
- Include []ListContainersIncludeType
- // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The operation returns the NextMarker
- // value within the response body if the listing operation did not return all containers remaining to be listed with the current page. The NextMarker value
- // can be used as the value for the marker parameter in a subsequent call to request the next page of list items. The marker value is opaque to the client.
- Marker *string
- // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value greater than 5000, the server
- // will return up to 5000 items. Note that if the listing operation crosses a partition boundary, then the service will return a continuation token for
- // retrieving the remainder of the results. For this reason, it is possible that the service will return fewer results than specified by maxresults, or
- // than the default of 5000.
- Maxresults *int32
- // Filters the results to return only containers whose name begins with the specified prefix.
- Prefix *string
- // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
- RequestID *string
- // The timeout parameter is expressed in seconds. For more information, see Setting
- // Timeouts for Blob Service Operations.
- Timeout *int32
-}
-
-// ServiceSetPropertiesOptions contains the optional parameters for the Service.SetProperties method.
-type ServiceSetPropertiesOptions struct {
- // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
- RequestID *string
- // The timeout parameter is expressed in seconds. For more information, see Setting
- // Timeouts for Blob Service Operations.
- Timeout *int32
-}
-
-// ServiceSubmitBatchOptions contains the optional parameters for the Service.SubmitBatch method.
-type ServiceSubmitBatchOptions struct {
- // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled.
- RequestID *string
- // The timeout parameter is expressed in seconds. For more information, see Setting
- // Timeouts for Blob Service Operations.
- Timeout *int32
-}
-
// SignedIdentifier - signed identifier
type SignedIdentifier struct {
// REQUIRED; An Access policy
@@ -1864,7 +921,7 @@ type SignedIdentifier struct {
ID *string `xml:"Id"`
}
-// SourceModifiedAccessConditions contains a group of parameters for the Directory.Rename method.
+// SourceModifiedAccessConditions contains a group of parameters for the blobClient.StartCopyFromURL method.
type SourceModifiedAccessConditions struct {
// Specify an ETag value to operate only on blobs with a matching value.
SourceIfMatch *string
@@ -1893,21 +950,13 @@ type StaticWebsite struct {
IndexDocument *string `xml:"IndexDocument"`
}
-//
-//// Implements the error and azcore.HTTPResponse interfaces.
-//type StorageError struct {
-// raw string
-// Message *string `xml:"Message"`
-//}
-//
-
// StorageServiceProperties - Storage Service Properties.
type StorageServiceProperties struct {
// The set of CORS rules.
Cors []*CorsRule `xml:"Cors>CorsRule"`
- // The default version to use for requests to the Blob service if an incoming request's version is not specified. Possible values include version 2008-10-27
- // and all more recent versions
+ // The default version to use for requests to the Blob service if an incoming request's version is not specified. Possible
+ // values include version 2008-10-27 and all more recent versions
DefaultServiceVersion *string `xml:"DefaultServiceVersion"`
// the retention policy which determines how long the associated data should persist
@@ -2003,3 +1052,1107 @@ func (u *UserDelegationKey) UnmarshalXML(d *xml.Decoder, start xml.StartElement)
u.SignedStart = (*time.Time)(aux.SignedStart)
return nil
}
+
+// appendBlobClientAppendBlockFromURLOptions contains the optional parameters for the appendBlobClient.AppendBlockFromURL
+// method.
+type appendBlobClientAppendBlockFromURLOptions struct {
+ // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source.
+ CopySourceAuthorization *string
+ // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+ // analytics logging is enabled.
+ RequestID *string
+ // Specify the md5 calculated for the range of bytes that must be read from the copy source.
+ SourceContentMD5 []byte
+ // Specify the crc64 calculated for the range of bytes that must be read from the copy source.
+ SourceContentcrc64 []byte
+ // Bytes of source data in the specified range.
+ SourceRange *string
+ // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+ // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+ Timeout *int32
+ // Specify the transactional md5 for the body, to be validated by the service.
+ TransactionalContentMD5 []byte
+}
+
+// appendBlobClientAppendBlockOptions contains the optional parameters for the appendBlobClient.AppendBlock method.
+type appendBlobClientAppendBlockOptions struct {
+ // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+ // analytics logging is enabled.
+ RequestID *string
+ // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+ // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+ Timeout *int32
+ // Specify the transactional crc64 for the body, to be validated by the service.
+ TransactionalContentCRC64 []byte
+ // Specify the transactional md5 for the body, to be validated by the service.
+ TransactionalContentMD5 []byte
+}
+
+// appendBlobClientCreateOptions contains the optional parameters for the appendBlobClient.Create method.
+type appendBlobClientCreateOptions struct {
+ // Optional. Used to set blob tags in various blob operations.
+ BlobTagsString *string
+ // Specifies the date time when the blobs immutability policy is set to expire.
+ ImmutabilityPolicyExpiry *time.Time
+ // Specifies the immutability policy mode to set on the blob.
+ ImmutabilityPolicyMode *BlobImmutabilityPolicyMode
+ // Specified if a legal hold should be set on the blob.
+ LegalHold *bool
+ // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the
+ // operation will copy the metadata from the source blob or file to the destination
+ // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata
+ // is not copied from the source blob or file. Note that beginning with
+ // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers,
+ // Blobs, and Metadata for more information.
+ Metadata map[string]string
+ // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+ // analytics logging is enabled.
+ RequestID *string
+ // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+ // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+ Timeout *int32
+}
+
+// appendBlobClientSealOptions contains the optional parameters for the appendBlobClient.Seal method.
+type appendBlobClientSealOptions struct {
+ // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+ // analytics logging is enabled.
+ RequestID *string
+ // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+ // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+ Timeout *int32
+}
+
+// blobClientAbortCopyFromURLOptions contains the optional parameters for the blobClient.AbortCopyFromURL method.
+type blobClientAbortCopyFromURLOptions struct {
+ // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+ // analytics logging is enabled.
+ RequestID *string
+ // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+ // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+ Timeout *int32
+}
+
+// blobClientAcquireLeaseOptions contains the optional parameters for the blobClient.AcquireLease method.
+type blobClientAcquireLeaseOptions struct {
+ // Specifies the duration of the lease, in seconds, or negative one (-1) for a lease that never expires. A non-infinite lease
+ // can be between 15 and 60 seconds. A lease duration cannot be changed using
+ // renew or change.
+ Duration *int32
+ // Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed lease ID is
+ // not in the correct format. See Guid Constructor (String) for a list of valid GUID
+ // string formats.
+ ProposedLeaseID *string
+ // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+ // analytics logging is enabled.
+ RequestID *string
+ // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+ // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+ Timeout *int32
+}
+
+// blobClientBreakLeaseOptions contains the optional parameters for the blobClient.BreakLease method.
+type blobClientBreakLeaseOptions struct {
+ // For a break operation, proposed duration the lease should continue before it is broken, in seconds, between 0 and 60. This
+ // break period is only used if it is shorter than the time remaining on the
+ // lease. If longer, the time remaining on the lease is used. A new lease will not be available before the break period has
+ // expired, but the lease may be held for longer than the break period. If this
+ // header does not appear with a break operation, a fixed-duration lease breaks after the remaining lease period elapses,
+ // and an infinite lease breaks immediately.
+ BreakPeriod *int32
+ // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+ // analytics logging is enabled.
+ RequestID *string
+ // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+ // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+ Timeout *int32
+}
+
+// blobClientChangeLeaseOptions contains the optional parameters for the blobClient.ChangeLease method.
+type blobClientChangeLeaseOptions struct {
+ // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+ // analytics logging is enabled.
+ RequestID *string
+ // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+ // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+ Timeout *int32
+}
+
+// blobClientCopyFromURLOptions contains the optional parameters for the blobClient.CopyFromURL method.
+type blobClientCopyFromURLOptions struct {
+ // Optional. Used to set blob tags in various blob operations.
+ BlobTagsString *string
+ // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source.
+ CopySourceAuthorization *string
+ // Specifies the date time when the blobs immutability policy is set to expire.
+ ImmutabilityPolicyExpiry *time.Time
+ // Specifies the immutability policy mode to set on the blob.
+ ImmutabilityPolicyMode *BlobImmutabilityPolicyMode
+ // Specified if a legal hold should be set on the blob.
+ LegalHold *bool
+ // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the
+ // operation will copy the metadata from the source blob or file to the destination
+ // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata
+ // is not copied from the source blob or file. Note that beginning with
+ // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers,
+ // Blobs, and Metadata for more information.
+ Metadata map[string]string
+ // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+ // analytics logging is enabled.
+ RequestID *string
+ // Specify the md5 calculated for the range of bytes that must be read from the copy source.
+ SourceContentMD5 []byte
+ // Optional. Indicates the tier to be set on the blob.
+ Tier *AccessTier
+ // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+ // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+ Timeout *int32
+}
+
+// blobClientCreateSnapshotOptions contains the optional parameters for the blobClient.CreateSnapshot method.
+type blobClientCreateSnapshotOptions struct {
+ // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the
+ // operation will copy the metadata from the source blob or file to the destination
+ // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata
+ // is not copied from the source blob or file. Note that beginning with
+ // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers,
+ // Blobs, and Metadata for more information.
+ Metadata map[string]string
+ // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+ // analytics logging is enabled.
+ RequestID *string
+ // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+ // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+ Timeout *int32
+}
+
+// blobClientDeleteImmutabilityPolicyOptions contains the optional parameters for the blobClient.DeleteImmutabilityPolicy
+// method.
+type blobClientDeleteImmutabilityPolicyOptions struct {
+ // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+ // analytics logging is enabled.
+ RequestID *string
+ // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+ // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+ Timeout *int32
+}
+
+// blobClientDeleteOptions contains the optional parameters for the blobClient.Delete method.
+type blobClientDeleteOptions struct {
+ // Optional. Only possible value is 'permanent', which specifies to permanently delete a blob if blob soft delete is enabled.
+ BlobDeleteType *BlobDeleteType
+ // Required if the blob has associated snapshots. Specify one of the following two options: include: Delete the base blob
+ // and all of its snapshots. only: Delete only the blob's snapshots and not the blob
+ // itself
+ DeleteSnapshots *DeleteSnapshotsOptionType
+ // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+ // analytics logging is enabled.
+ RequestID *string
+ // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more
+ // information on working with blob snapshots, see Creating a Snapshot of a Blob.
+ // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob]
+ Snapshot *string
+ // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+ // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+ Timeout *int32
+ // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on.
+ // It's for service version 2019-10-10 and newer.
+ VersionID *string
+}
+
+// blobClientDownloadOptions contains the optional parameters for the blobClient.Download method.
+type blobClientDownloadOptions struct {
+ // Return only the bytes of the blob in the specified range.
+ Range *string
+ // When set to true and specified together with the Range, the service returns the CRC64 hash for the range, as long as the
+ // range is less than or equal to 4 MB in size.
+ RangeGetContentCRC64 *bool
+ // When set to true and specified together with the Range, the service returns the MD5 hash for the range, as long as the
+ // range is less than or equal to 4 MB in size.
+ RangeGetContentMD5 *bool
+ // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+ // analytics logging is enabled.
+ RequestID *string
+ // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more
+ // information on working with blob snapshots, see Creating a Snapshot of a Blob.
+ // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob]
+ Snapshot *string
+ // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+ // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+ Timeout *int32
+ // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on.
+ // It's for service version 2019-10-10 and newer.
+ VersionID *string
+}
+
+// blobClientGetAccountInfoOptions contains the optional parameters for the blobClient.GetAccountInfo method.
+type blobClientGetAccountInfoOptions struct {
+ // placeholder for future optional parameters
+}
+
+// blobClientGetPropertiesOptions contains the optional parameters for the blobClient.GetProperties method.
+type blobClientGetPropertiesOptions struct {
+ // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+ // analytics logging is enabled.
+ RequestID *string
+ // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more
+ // information on working with blob snapshots, see Creating a Snapshot of a Blob.
+ // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob]
+ Snapshot *string
+ // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+ // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+ Timeout *int32
+ // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on.
+ // It's for service version 2019-10-10 and newer.
+ VersionID *string
+}
+
+// blobClientGetTagsOptions contains the optional parameters for the blobClient.GetTags method.
+type blobClientGetTagsOptions struct {
+ // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+ // analytics logging is enabled.
+ RequestID *string
+ // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more
+ // information on working with blob snapshots, see Creating a Snapshot of a Blob.
+ // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob]
+ Snapshot *string
+ // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+ // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+ Timeout *int32
+ // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on.
+ // It's for service version 2019-10-10 and newer.
+ VersionID *string
+}
+
+// blobClientQueryOptions contains the optional parameters for the blobClient.Query method.
+type blobClientQueryOptions struct {
+ // the query request
+ QueryRequest *QueryRequest
+ // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+ // analytics logging is enabled.
+ RequestID *string
+ // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more
+ // information on working with blob snapshots, see Creating a Snapshot of a Blob.
+ // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob]
+ Snapshot *string
+ // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+ // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+ Timeout *int32
+}
+
+// blobClientReleaseLeaseOptions contains the optional parameters for the blobClient.ReleaseLease method.
+type blobClientReleaseLeaseOptions struct {
+ // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+ // analytics logging is enabled.
+ RequestID *string
+ // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+ // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+ Timeout *int32
+}
+
+// blobClientRenewLeaseOptions contains the optional parameters for the blobClient.RenewLease method.
+type blobClientRenewLeaseOptions struct {
+ // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+ // analytics logging is enabled.
+ RequestID *string
+ // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+ // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+ Timeout *int32
+}
+
+// blobClientSetExpiryOptions contains the optional parameters for the blobClient.SetExpiry method.
+type blobClientSetExpiryOptions struct {
+ // The time to set the blob to expiry
+ ExpiresOn *string
+ // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+ // analytics logging is enabled.
+ RequestID *string
+ // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+ // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+ Timeout *int32
+}
+
+// blobClientSetHTTPHeadersOptions contains the optional parameters for the blobClient.SetHTTPHeaders method.
+type blobClientSetHTTPHeadersOptions struct {
+ // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+ // analytics logging is enabled.
+ RequestID *string
+ // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+ // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+ Timeout *int32
+}
+
+// blobClientSetImmutabilityPolicyOptions contains the optional parameters for the blobClient.SetImmutabilityPolicy method.
+type blobClientSetImmutabilityPolicyOptions struct {
+ // Specifies the date time when the blobs immutability policy is set to expire.
+ ImmutabilityPolicyExpiry *time.Time
+ // Specifies the immutability policy mode to set on the blob.
+ ImmutabilityPolicyMode *BlobImmutabilityPolicyMode
+ // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+ // analytics logging is enabled.
+ RequestID *string
+ // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+ // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+ Timeout *int32
+}
+
+// blobClientSetLegalHoldOptions contains the optional parameters for the blobClient.SetLegalHold method.
+type blobClientSetLegalHoldOptions struct {
+ // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+ // analytics logging is enabled.
+ RequestID *string
+ // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+ // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+ Timeout *int32
+}
+
+// blobClientSetMetadataOptions contains the optional parameters for the blobClient.SetMetadata method.
+type blobClientSetMetadataOptions struct {
+ // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the
+ // operation will copy the metadata from the source blob or file to the destination
+ // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata
+ // is not copied from the source blob or file. Note that beginning with
+ // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers,
+ // Blobs, and Metadata for more information.
+ Metadata map[string]string
+ // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+ // analytics logging is enabled.
+ RequestID *string
+ // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+ // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+ Timeout *int32
+}
+
+// blobClientSetTagsOptions contains the optional parameters for the blobClient.SetTags method.
+type blobClientSetTagsOptions struct {
+ // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+ // analytics logging is enabled.
+ RequestID *string
+ // Blob tags
+ Tags *BlobTags
+ // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+ // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+ Timeout *int32
+ // Specify the transactional crc64 for the body, to be validated by the service.
+ TransactionalContentCRC64 []byte
+ // Specify the transactional md5 for the body, to be validated by the service.
+ TransactionalContentMD5 []byte
+ // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on.
+ // It's for service version 2019-10-10 and newer.
+ VersionID *string
+}
+
+// blobClientSetTierOptions contains the optional parameters for the blobClient.SetTier method.
+type blobClientSetTierOptions struct {
+ // Optional: Indicates the priority with which to rehydrate an archived blob.
+ RehydratePriority *RehydratePriority
+ // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+ // analytics logging is enabled.
+ RequestID *string
+ // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more
+ // information on working with blob snapshots, see Creating a Snapshot of a Blob.
+ // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob]
+ Snapshot *string
+ // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+ // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+ Timeout *int32
+ // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on.
+ // It's for service version 2019-10-10 and newer.
+ VersionID *string
+}
+
+// blobClientStartCopyFromURLOptions contains the optional parameters for the blobClient.StartCopyFromURL method.
+type blobClientStartCopyFromURLOptions struct {
+ // Optional. Used to set blob tags in various blob operations.
+ BlobTagsString *string
+ // Specifies the date time when the blobs immutability policy is set to expire.
+ ImmutabilityPolicyExpiry *time.Time
+ // Specifies the immutability policy mode to set on the blob.
+ ImmutabilityPolicyMode *BlobImmutabilityPolicyMode
+ // Specified if a legal hold should be set on the blob.
+ LegalHold *bool
+ // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the
+ // operation will copy the metadata from the source blob or file to the destination
+ // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata
+ // is not copied from the source blob or file. Note that beginning with
+ // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers,
+ // Blobs, and Metadata for more information.
+ Metadata map[string]string
+ // Optional: Indicates the priority with which to rehydrate an archived blob.
+ RehydratePriority *RehydratePriority
+ // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+ // analytics logging is enabled.
+ RequestID *string
+ // Overrides the sealed state of the destination blob. Service version 2019-12-12 and newer.
+ SealBlob *bool
+ // Optional. Indicates the tier to be set on the blob.
+ Tier *AccessTier
+ // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+ // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+ Timeout *int32
+}
+
+// blobClientUndeleteOptions contains the optional parameters for the blobClient.Undelete method.
+type blobClientUndeleteOptions struct {
+ // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+ // analytics logging is enabled.
+ RequestID *string
+ // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+ // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+ Timeout *int32
+}
+
+// blockBlobClientCommitBlockListOptions contains the optional parameters for the blockBlobClient.CommitBlockList method.
+type blockBlobClientCommitBlockListOptions struct {
+ // Optional. Used to set blob tags in various blob operations.
+ BlobTagsString *string
+ // Specifies the date time when the blobs immutability policy is set to expire.
+ ImmutabilityPolicyExpiry *time.Time
+ // Specifies the immutability policy mode to set on the blob.
+ ImmutabilityPolicyMode *BlobImmutabilityPolicyMode
+ // Specified if a legal hold should be set on the blob.
+ LegalHold *bool
+ // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the
+ // operation will copy the metadata from the source blob or file to the destination
+ // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata
+ // is not copied from the source blob or file. Note that beginning with
+ // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers,
+ // Blobs, and Metadata for more information.
+ Metadata map[string]string
+ // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+ // analytics logging is enabled.
+ RequestID *string
+ // Optional. Indicates the tier to be set on the blob.
+ Tier *AccessTier
+ // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+ // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+ Timeout *int32
+ // Specify the transactional crc64 for the body, to be validated by the service.
+ TransactionalContentCRC64 []byte
+ // Specify the transactional md5 for the body, to be validated by the service.
+ TransactionalContentMD5 []byte
+}
+
+// blockBlobClientGetBlockListOptions contains the optional parameters for the blockBlobClient.GetBlockList method.
+type blockBlobClientGetBlockListOptions struct {
+ // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+ // analytics logging is enabled.
+ RequestID *string
+ // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more
+ // information on working with blob snapshots, see Creating a Snapshot of a Blob.
+ // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob]
+ Snapshot *string
+ // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+ // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+ Timeout *int32
+}
+
+// blockBlobClientPutBlobFromURLOptions contains the optional parameters for the blockBlobClient.PutBlobFromURL method.
+type blockBlobClientPutBlobFromURLOptions struct {
+ // Optional. Used to set blob tags in various blob operations.
+ BlobTagsString *string
+ // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source.
+ CopySourceAuthorization *string
+ // Optional, default is true. Indicates if properties from the source blob should be copied.
+ CopySourceBlobProperties *bool
+ // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the
+ // operation will copy the metadata from the source blob or file to the destination
+ // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata
+ // is not copied from the source blob or file. Note that beginning with
+ // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers,
+ // Blobs, and Metadata for more information.
+ Metadata map[string]string
+ // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+ // analytics logging is enabled.
+ RequestID *string
+ // Specify the md5 calculated for the range of bytes that must be read from the copy source.
+ SourceContentMD5 []byte
+ // Optional. Indicates the tier to be set on the blob.
+ Tier *AccessTier
+ // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+ // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+ Timeout *int32
+ // Specify the transactional md5 for the body, to be validated by the service.
+ TransactionalContentMD5 []byte
+}
+
+// blockBlobClientStageBlockFromURLOptions contains the optional parameters for the blockBlobClient.StageBlockFromURL method.
+type blockBlobClientStageBlockFromURLOptions struct {
+ // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source.
+ CopySourceAuthorization *string
+ // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+ // analytics logging is enabled.
+ RequestID *string
+ // Specify the md5 calculated for the range of bytes that must be read from the copy source.
+ SourceContentMD5 []byte
+ // Specify the crc64 calculated for the range of bytes that must be read from the copy source.
+ SourceContentcrc64 []byte
+ // Bytes of source data in the specified range.
+ SourceRange *string
+ // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+ // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+ Timeout *int32
+}
+
+// blockBlobClientStageBlockOptions contains the optional parameters for the blockBlobClient.StageBlock method.
+type blockBlobClientStageBlockOptions struct {
+ // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+ // analytics logging is enabled.
+ RequestID *string
+ // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+ // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+ Timeout *int32
+ // Specify the transactional crc64 for the body, to be validated by the service.
+ TransactionalContentCRC64 []byte
+ // Specify the transactional md5 for the body, to be validated by the service.
+ TransactionalContentMD5 []byte
+}
+
+// blockBlobClientUploadOptions contains the optional parameters for the blockBlobClient.Upload method.
+type blockBlobClientUploadOptions struct {
+ // Optional. Used to set blob tags in various blob operations.
+ BlobTagsString *string
+ // Specifies the date time when the blobs immutability policy is set to expire.
+ ImmutabilityPolicyExpiry *time.Time
+ // Specifies the immutability policy mode to set on the blob.
+ ImmutabilityPolicyMode *BlobImmutabilityPolicyMode
+ // Specified if a legal hold should be set on the blob.
+ LegalHold *bool
+ // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the
+ // operation will copy the metadata from the source blob or file to the destination
+ // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata
+ // is not copied from the source blob or file. Note that beginning with
+ // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers,
+ // Blobs, and Metadata for more information.
+ Metadata map[string]string
+ // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+ // analytics logging is enabled.
+ RequestID *string
+ // Optional. Indicates the tier to be set on the blob.
+ Tier *AccessTier
+ // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+ // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+ Timeout *int32
+ // Specify the transactional md5 for the body, to be validated by the service.
+ TransactionalContentMD5 []byte
+}
+
+// containerClientAcquireLeaseOptions contains the optional parameters for the containerClient.AcquireLease method.
+type containerClientAcquireLeaseOptions struct {
+ // Specifies the duration of the lease, in seconds, or negative one (-1) for a lease that never expires. A non-infinite lease
+ // can be between 15 and 60 seconds. A lease duration cannot be changed using
+ // renew or change.
+ Duration *int32
+ // Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed lease ID is
+ // not in the correct format. See Guid Constructor (String) for a list of valid GUID
+ // string formats.
+ ProposedLeaseID *string
+ // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+ // analytics logging is enabled.
+ RequestID *string
+ // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+ // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+ Timeout *int32
+}
+
+// containerClientBreakLeaseOptions contains the optional parameters for the containerClient.BreakLease method.
+type containerClientBreakLeaseOptions struct {
+ // For a break operation, proposed duration the lease should continue before it is broken, in seconds, between 0 and 60. This
+ // break period is only used if it is shorter than the time remaining on the
+ // lease. If longer, the time remaining on the lease is used. A new lease will not be available before the break period has
+ // expired, but the lease may be held for longer than the break period. If this
+ // header does not appear with a break operation, a fixed-duration lease breaks after the remaining lease period elapses,
+ // and an infinite lease breaks immediately.
+ BreakPeriod *int32
+ // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+ // analytics logging is enabled.
+ RequestID *string
+ // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+ // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+ Timeout *int32
+}
+
+// containerClientChangeLeaseOptions contains the optional parameters for the containerClient.ChangeLease method.
+type containerClientChangeLeaseOptions struct {
+ // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+ // analytics logging is enabled.
+ RequestID *string
+ // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+ // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+ Timeout *int32
+}
+
+// containerClientCreateOptions contains the optional parameters for the containerClient.Create method.
+type containerClientCreateOptions struct {
+ // Specifies whether data in the container may be accessed publicly and the level of access
+ Access *PublicAccessType
+ // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the
+ // operation will copy the metadata from the source blob or file to the destination
+ // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata
+ // is not copied from the source blob or file. Note that beginning with
+ // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers,
+ // Blobs, and Metadata for more information.
+ Metadata map[string]string
+ // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+ // analytics logging is enabled.
+ RequestID *string
+ // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+ // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+ Timeout *int32
+}
+
+// containerClientDeleteOptions contains the optional parameters for the containerClient.Delete method.
+type containerClientDeleteOptions struct {
+ // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+ // analytics logging is enabled.
+ RequestID *string
+ // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+ // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+ Timeout *int32
+}
+
+// containerClientGetAccessPolicyOptions contains the optional parameters for the containerClient.GetAccessPolicy method.
+type containerClientGetAccessPolicyOptions struct {
+ // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+ // analytics logging is enabled.
+ RequestID *string
+ // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+ // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+ Timeout *int32
+}
+
+// containerClientGetAccountInfoOptions contains the optional parameters for the containerClient.GetAccountInfo method.
+type containerClientGetAccountInfoOptions struct {
+ // placeholder for future optional parameters
+}
+
+// containerClientGetPropertiesOptions contains the optional parameters for the containerClient.GetProperties method.
+type containerClientGetPropertiesOptions struct {
+ // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+ // analytics logging is enabled.
+ RequestID *string
+ // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+ // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+ Timeout *int32
+}
+
+// containerClientListBlobFlatSegmentOptions contains the optional parameters for the containerClient.ListBlobFlatSegment
+// method.
+type containerClientListBlobFlatSegmentOptions struct {
+ // Include this parameter to specify one or more datasets to include in the response.
+ Include []ListBlobsIncludeItem
+ // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The
+ // operation returns the NextMarker value within the response body if the listing
+ // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used
+ // as the value for the marker parameter in a subsequent call to request the next
+ // page of list items. The marker value is opaque to the client.
+ Marker *string
+ // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value
+ // greater than 5000, the server will return up to 5000 items. Note that if the
+ // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder
+ // of the results. For this reason, it is possible that the service will
+ // return fewer results than specified by maxresults, or than the default of 5000.
+ Maxresults *int32
+ // Filters the results to return only containers whose name begins with the specified prefix.
+ Prefix *string
+ // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+ // analytics logging is enabled.
+ RequestID *string
+ // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+ // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+ Timeout *int32
+}
+
+// containerClientListBlobHierarchySegmentOptions contains the optional parameters for the containerClient.ListBlobHierarchySegment
+// method.
+type containerClientListBlobHierarchySegmentOptions struct {
+ // Include this parameter to specify one or more datasets to include in the response.
+ Include []ListBlobsIncludeItem
+ // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The
+ // operation returns the NextMarker value within the response body if the listing
+ // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used
+ // as the value for the marker parameter in a subsequent call to request the next
+ // page of list items. The marker value is opaque to the client.
+ Marker *string
+ // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value
+ // greater than 5000, the server will return up to 5000 items. Note that if the
+ // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder
+ // of the results. For this reason, it is possible that the service will
+ // return fewer results than specified by maxresults, or than the default of 5000.
+ Maxresults *int32
+ // Filters the results to return only containers whose name begins with the specified prefix.
+ Prefix *string
+ // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+ // analytics logging is enabled.
+ RequestID *string
+ // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+ // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+ Timeout *int32
+}
+
+// containerClientReleaseLeaseOptions contains the optional parameters for the containerClient.ReleaseLease method.
+type containerClientReleaseLeaseOptions struct {
+ // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+ // analytics logging is enabled.
+ RequestID *string
+ // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+ // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+ Timeout *int32
+}
+
+// containerClientRenameOptions contains the optional parameters for the containerClient.Rename method.
+type containerClientRenameOptions struct {
+ // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+ // analytics logging is enabled.
+ RequestID *string
+ // A lease ID for the source path. If specified, the source path must have an active lease and the lease ID must match.
+ SourceLeaseID *string
+ // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+ // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+ Timeout *int32
+}
+
+// containerClientRenewLeaseOptions contains the optional parameters for the containerClient.RenewLease method.
+type containerClientRenewLeaseOptions struct {
+ // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+ // analytics logging is enabled.
+ RequestID *string
+ // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+ // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+ Timeout *int32
+}
+
+// containerClientRestoreOptions contains the optional parameters for the containerClient.Restore method.
+type containerClientRestoreOptions struct {
+ // Optional. Version 2019-12-12 and later. Specifies the name of the deleted container to restore.
+ DeletedContainerName *string
+ // Optional. Version 2019-12-12 and later. Specifies the version of the deleted container to restore.
+ DeletedContainerVersion *string
+ // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+ // analytics logging is enabled.
+ RequestID *string
+ // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+ // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+ Timeout *int32
+}
+
+// containerClientSetAccessPolicyOptions contains the optional parameters for the containerClient.SetAccessPolicy method.
+type containerClientSetAccessPolicyOptions struct {
+ // Specifies whether data in the container may be accessed publicly and the level of access
+ Access *PublicAccessType
+ // the acls for the container
+ ContainerACL []*SignedIdentifier
+ // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+ // analytics logging is enabled.
+ RequestID *string
+ // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+ // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+ Timeout *int32
+}
+
+// containerClientSetMetadataOptions contains the optional parameters for the containerClient.SetMetadata method.
+type containerClientSetMetadataOptions struct {
+ // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the
+ // operation will copy the metadata from the source blob or file to the destination
+ // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata
+ // is not copied from the source blob or file. Note that beginning with
+ // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers,
+ // Blobs, and Metadata for more information.
+ Metadata map[string]string
+ // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+ // analytics logging is enabled.
+ RequestID *string
+ // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+ // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+ Timeout *int32
+}
+
+// containerClientSubmitBatchOptions contains the optional parameters for the containerClient.SubmitBatch method.
+type containerClientSubmitBatchOptions struct {
+ // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+ // analytics logging is enabled.
+ RequestID *string
+ // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+ // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+ Timeout *int32
+}
+
+// pageBlobClientClearPagesOptions contains the optional parameters for the pageBlobClient.ClearPages method.
+type pageBlobClientClearPagesOptions struct {
+ // Return only the bytes of the blob in the specified range.
+ Range *string
+ // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+ // analytics logging is enabled.
+ RequestID *string
+ // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+ // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+ Timeout *int32
+}
+
+// pageBlobClientCopyIncrementalOptions contains the optional parameters for the pageBlobClient.CopyIncremental method.
+type pageBlobClientCopyIncrementalOptions struct {
+ // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+ // analytics logging is enabled.
+ RequestID *string
+ // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+ // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+ Timeout *int32
+}
+
+// pageBlobClientCreateOptions contains the optional parameters for the pageBlobClient.Create method.
+type pageBlobClientCreateOptions struct {
+ // Set for page blobs only. The sequence number is a user-controlled value that you can use to track requests. The value of
+ // the sequence number must be between 0 and 2^63 - 1.
+ BlobSequenceNumber *int64
+ // Optional. Used to set blob tags in various blob operations.
+ BlobTagsString *string
+ // Specifies the date time when the blobs immutability policy is set to expire.
+ ImmutabilityPolicyExpiry *time.Time
+ // Specifies the immutability policy mode to set on the blob.
+ ImmutabilityPolicyMode *BlobImmutabilityPolicyMode
+ // Specified if a legal hold should be set on the blob.
+ LegalHold *bool
+ // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the
+ // operation will copy the metadata from the source blob or file to the destination
+ // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata
+ // is not copied from the source blob or file. Note that beginning with
+ // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers,
+ // Blobs, and Metadata for more information.
+ Metadata map[string]string
+ // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+ // analytics logging is enabled.
+ RequestID *string
+ // Optional. Indicates the tier to be set on the page blob.
+ Tier *PremiumPageBlobAccessTier
+ // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+ // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+ Timeout *int32
+}
+
+// pageBlobClientGetPageRangesDiffOptions contains the optional parameters for the pageBlobClient.GetPageRangesDiff method.
+type pageBlobClientGetPageRangesDiffOptions struct {
+ // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The
+ // operation returns the NextMarker value within the response body if the listing
+ // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used
+ // as the value for the marker parameter in a subsequent call to request the next
+ // page of list items. The marker value is opaque to the client.
+ Marker *string
+ // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value
+ // greater than 5000, the server will return up to 5000 items. Note that if the
+ // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder
+ // of the results. For this reason, it is possible that the service will
+ // return fewer results than specified by maxresults, or than the default of 5000.
+ Maxresults *int32
+ // Optional. This header is only supported in service versions 2019-04-19 and after and specifies the URL of a previous snapshot
+ // of the target blob. The response will only contain pages that were changed
+ // between the target blob and its previous snapshot.
+ PrevSnapshotURL *string
+ // Optional in version 2015-07-08 and newer. The prevsnapshot parameter is a DateTime value that specifies that the response
+ // will contain only pages that were changed between target blob and previous
+ // snapshot. Changed pages include both updated and cleared pages. The target blob may be a snapshot, as long as the snapshot
+ // specified by prevsnapshot is the older of the two. Note that incremental
+ // snapshots are currently supported only for blobs created on or after January 1, 2016.
+ Prevsnapshot *string
+ // Return only the bytes of the blob in the specified range.
+ Range *string
+ // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+ // analytics logging is enabled.
+ RequestID *string
+ // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more
+ // information on working with blob snapshots, see Creating a Snapshot of a Blob.
+ // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob]
+ Snapshot *string
+ // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+ // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+ Timeout *int32
+}
+
+// pageBlobClientGetPageRangesOptions contains the optional parameters for the pageBlobClient.GetPageRanges method.
+type pageBlobClientGetPageRangesOptions struct {
+ // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The
+ // operation returns the NextMarker value within the response body if the listing
+ // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used
+ // as the value for the marker parameter in a subsequent call to request the next
+ // page of list items. The marker value is opaque to the client.
+ Marker *string
+ // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value
+ // greater than 5000, the server will return up to 5000 items. Note that if the
+ // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder
+ // of the results. For this reason, it is possible that the service will
+ // return fewer results than specified by maxresults, or than the default of 5000.
+ Maxresults *int32
+ // Return only the bytes of the blob in the specified range.
+ Range *string
+ // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+ // analytics logging is enabled.
+ RequestID *string
+ // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more
+ // information on working with blob snapshots, see Creating a Snapshot of a Blob.
+ // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob]
+ Snapshot *string
+ // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+ // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+ Timeout *int32
+}
+
+// pageBlobClientResizeOptions contains the optional parameters for the pageBlobClient.Resize method.
+type pageBlobClientResizeOptions struct {
+ // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+ // analytics logging is enabled.
+ RequestID *string
+ // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+ // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+ Timeout *int32
+}
+
+// pageBlobClientUpdateSequenceNumberOptions contains the optional parameters for the pageBlobClient.UpdateSequenceNumber
+// method.
+type pageBlobClientUpdateSequenceNumberOptions struct {
+ // Set for page blobs only. The sequence number is a user-controlled value that you can use to track requests. The value of
+ // the sequence number must be between 0 and 2^63 - 1.
+ BlobSequenceNumber *int64
+ // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+ // analytics logging is enabled.
+ RequestID *string
+ // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+ // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+ Timeout *int32
+}
+
+// pageBlobClientUploadPagesFromURLOptions contains the optional parameters for the pageBlobClient.UploadPagesFromURL method.
+type pageBlobClientUploadPagesFromURLOptions struct {
+ // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source.
+ CopySourceAuthorization *string
+ // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+ // analytics logging is enabled.
+ RequestID *string
+ // Specify the md5 calculated for the range of bytes that must be read from the copy source.
+ SourceContentMD5 []byte
+ // Specify the crc64 calculated for the range of bytes that must be read from the copy source.
+ SourceContentcrc64 []byte
+ // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+ // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+ Timeout *int32
+}
+
+// pageBlobClientUploadPagesOptions contains the optional parameters for the pageBlobClient.UploadPages method.
+type pageBlobClientUploadPagesOptions struct {
+ // Return only the bytes of the blob in the specified range.
+ Range *string
+ // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+ // analytics logging is enabled.
+ RequestID *string
+ // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+ // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+ Timeout *int32
+ // Specify the transactional crc64 for the body, to be validated by the service.
+ TransactionalContentCRC64 []byte
+ // Specify the transactional md5 for the body, to be validated by the service.
+ TransactionalContentMD5 []byte
+}
+
+// serviceClientFilterBlobsOptions contains the optional parameters for the serviceClient.FilterBlobs method.
+type serviceClientFilterBlobsOptions struct {
+ // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The
+ // operation returns the NextMarker value within the response body if the listing
+ // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used
+ // as the value for the marker parameter in a subsequent call to request the next
+ // page of list items. The marker value is opaque to the client.
+ Marker *string
+ // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value
+ // greater than 5000, the server will return up to 5000 items. Note that if the
+ // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder
+ // of the results. For this reason, it is possible that the service will
+ // return fewer results than specified by maxresults, or than the default of 5000.
+ Maxresults *int32
+ // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+ // analytics logging is enabled.
+ RequestID *string
+ // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+ // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+ Timeout *int32
+ // Filters the results to return only to return only blobs whose tags match the specified expression.
+ Where *string
+}
+
+// serviceClientGetAccountInfoOptions contains the optional parameters for the serviceClient.GetAccountInfo method.
+type serviceClientGetAccountInfoOptions struct {
+ // placeholder for future optional parameters
+}
+
+// serviceClientGetPropertiesOptions contains the optional parameters for the serviceClient.GetProperties method.
+type serviceClientGetPropertiesOptions struct {
+ // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+ // analytics logging is enabled.
+ RequestID *string
+ // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+ // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+ Timeout *int32
+}
+
+// serviceClientGetStatisticsOptions contains the optional parameters for the serviceClient.GetStatistics method.
+type serviceClientGetStatisticsOptions struct {
+ // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+ // analytics logging is enabled.
+ RequestID *string
+ // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+ // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+ Timeout *int32
+}
+
+// serviceClientGetUserDelegationKeyOptions contains the optional parameters for the serviceClient.GetUserDelegationKey method.
+type serviceClientGetUserDelegationKeyOptions struct {
+ // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+ // analytics logging is enabled.
+ RequestID *string
+ // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+ // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+ Timeout *int32
+}
+
+// serviceClientListContainersSegmentOptions contains the optional parameters for the serviceClient.ListContainersSegment
+// method.
+type serviceClientListContainersSegmentOptions struct {
+ // Include this parameter to specify that the container's metadata be returned as part of the response body.
+ Include []ListContainersIncludeType
+ // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The
+ // operation returns the NextMarker value within the response body if the listing
+ // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used
+ // as the value for the marker parameter in a subsequent call to request the next
+ // page of list items. The marker value is opaque to the client.
+ Marker *string
+ // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value
+ // greater than 5000, the server will return up to 5000 items. Note that if the
+ // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder
+ // of the results. For this reason, it is possible that the service will
+ // return fewer results than specified by maxresults, or than the default of 5000.
+ Maxresults *int32
+ // Filters the results to return only containers whose name begins with the specified prefix.
+ Prefix *string
+ // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+ // analytics logging is enabled.
+ RequestID *string
+ // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+ // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+ Timeout *int32
+}
+
+// serviceClientSetPropertiesOptions contains the optional parameters for the serviceClient.SetProperties method.
+type serviceClientSetPropertiesOptions struct {
+ // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+ // analytics logging is enabled.
+ RequestID *string
+ // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+ // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+ Timeout *int32
+}
+
+// serviceClientSubmitBatchOptions contains the optional parameters for the serviceClient.SubmitBatch method.
+type serviceClientSubmitBatchOptions struct {
+ // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+ // analytics logging is enabled.
+ RequestID *string
+ // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+ // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+ Timeout *int32
+}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_pageblob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_pageblob_client.go
index 651403bcb98..bad81201ba2 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_pageblob_client.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_pageblob_client.go
@@ -1,5 +1,5 @@
-//go:build go1.16
-// +build go1.16
+//go:build go1.18
+// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
@@ -11,52 +11,72 @@ package azblob
import (
"context"
"encoding/base64"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
"io"
"net/http"
"strconv"
"time"
-
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
)
type pageBlobClient struct {
- con *connection
+ endpoint string
+ pl runtime.Pipeline
+}
+
+// newPageBlobClient creates a new instance of pageBlobClient with the specified values.
+// endpoint - The URL of the service account, container, or blob that is the target of the desired operation.
+// pl - the pipeline used for sending requests and handling responses.
+func newPageBlobClient(endpoint string, pl runtime.Pipeline) *pageBlobClient {
+ client := &pageBlobClient{
+ endpoint: endpoint,
+ pl: pl,
+ }
+ return client
}
// ClearPages - The Clear Pages operation clears a set of pages from a page blob
-// If the operation fails it returns the *StorageError error type.
-func (client *pageBlobClient) ClearPages(ctx context.Context, contentLength int64, pageBlobClearPagesOptions *PageBlobClearPagesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (PageBlobClearPagesResponse, error) {
- req, err := client.clearPagesCreateRequest(ctx, contentLength, pageBlobClearPagesOptions, leaseAccessConditions, cpkInfo, cpkScopeInfo, sequenceNumberAccessConditions, modifiedAccessConditions)
+// If the operation fails it returns an *azcore.ResponseError type.
+// contentLength - The length of the request.
+// pageBlobClientClearPagesOptions - pageBlobClientClearPagesOptions contains the optional parameters for the pageBlobClient.ClearPages
+// method.
+// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method.
+// CpkInfo - CpkInfo contains a group of parameters for the blobClient.Download method.
+// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the blobClient.SetMetadata method.
+// SequenceNumberAccessConditions - SequenceNumberAccessConditions contains a group of parameters for the pageBlobClient.UploadPages
+// method.
+// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method.
+func (client *pageBlobClient) ClearPages(ctx context.Context, contentLength int64, pageBlobClientClearPagesOptions *pageBlobClientClearPagesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (pageBlobClientClearPagesResponse, error) {
+ req, err := client.clearPagesCreateRequest(ctx, contentLength, pageBlobClientClearPagesOptions, leaseAccessConditions, cpkInfo, cpkScopeInfo, sequenceNumberAccessConditions, modifiedAccessConditions)
if err != nil {
- return PageBlobClearPagesResponse{}, err
+ return pageBlobClientClearPagesResponse{}, err
}
- resp, err := client.con.Pipeline().Do(req)
+ resp, err := client.pl.Do(req)
if err != nil {
- return PageBlobClearPagesResponse{}, err
+ return pageBlobClientClearPagesResponse{}, err
}
if !runtime.HasStatusCode(resp, http.StatusCreated) {
- return PageBlobClearPagesResponse{}, runtime.NewResponseError(resp)
+ return pageBlobClientClearPagesResponse{}, runtime.NewResponseError(resp)
}
return client.clearPagesHandleResponse(resp)
}
// clearPagesCreateRequest creates the ClearPages request.
-func (client *pageBlobClient) clearPagesCreateRequest(ctx context.Context, contentLength int64, pageBlobClearPagesOptions *PageBlobClearPagesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) {
- req, err := runtime.NewRequest(ctx, http.MethodPut, client.con.Endpoint())
+func (client *pageBlobClient) clearPagesCreateRequest(ctx context.Context, contentLength int64, pageBlobClientClearPagesOptions *pageBlobClientClearPagesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) {
+ req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint)
if err != nil {
return nil, err
}
reqQP := req.Raw().URL.Query()
reqQP.Set("comp", "page")
- if pageBlobClearPagesOptions != nil && pageBlobClearPagesOptions.Timeout != nil {
- reqQP.Set("timeout", strconv.FormatInt(int64(*pageBlobClearPagesOptions.Timeout), 10))
+ if pageBlobClientClearPagesOptions != nil && pageBlobClientClearPagesOptions.Timeout != nil {
+ reqQP.Set("timeout", strconv.FormatInt(int64(*pageBlobClientClearPagesOptions.Timeout), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
req.Raw().Header.Set("x-ms-page-write", "clear")
req.Raw().Header.Set("Content-Length", strconv.FormatInt(contentLength, 10))
- if pageBlobClearPagesOptions != nil && pageBlobClearPagesOptions.Range != nil {
- req.Raw().Header.Set("x-ms-range", *pageBlobClearPagesOptions.Range)
+ if pageBlobClientClearPagesOptions != nil && pageBlobClientClearPagesOptions.Range != nil {
+ req.Raw().Header.Set("x-ms-range", *pageBlobClientClearPagesOptions.Range)
}
if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID)
@@ -68,7 +88,7 @@ func (client *pageBlobClient) clearPagesCreateRequest(ctx context.Context, conte
req.Raw().Header.Set("x-ms-encryption-key-sha256", *cpkInfo.EncryptionKeySHA256)
}
if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil {
- req.Raw().Header.Set("x-ms-encryption-algorithm", "AES256")
+ req.Raw().Header.Set("x-ms-encryption-algorithm", string(*cpkInfo.EncryptionAlgorithm))
}
if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil {
req.Raw().Header.Set("x-ms-encryption-scope", *cpkScopeInfo.EncryptionScope)
@@ -97,45 +117,45 @@ func (client *pageBlobClient) clearPagesCreateRequest(ctx context.Context, conte
if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil {
req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags)
}
- req.Raw().Header.Set("x-ms-version", "2019-12-12")
- if pageBlobClearPagesOptions != nil && pageBlobClearPagesOptions.RequestID != nil {
- req.Raw().Header.Set("x-ms-client-request-id", *pageBlobClearPagesOptions.RequestID)
+ req.Raw().Header.Set("x-ms-version", "2020-10-02")
+ if pageBlobClientClearPagesOptions != nil && pageBlobClientClearPagesOptions.RequestID != nil {
+ req.Raw().Header.Set("x-ms-client-request-id", *pageBlobClientClearPagesOptions.RequestID)
}
req.Raw().Header.Set("Accept", "application/xml")
return req, nil
}
// clearPagesHandleResponse handles the ClearPages response.
-func (client *pageBlobClient) clearPagesHandleResponse(resp *http.Response) (PageBlobClearPagesResponse, error) {
- result := PageBlobClearPagesResponse{RawResponse: resp}
+func (client *pageBlobClient) clearPagesHandleResponse(resp *http.Response) (pageBlobClientClearPagesResponse, error) {
+ result := pageBlobClientClearPagesResponse{RawResponse: resp}
if val := resp.Header.Get("ETag"); val != "" {
result.ETag = &val
}
if val := resp.Header.Get("Last-Modified"); val != "" {
lastModified, err := time.Parse(time.RFC1123, val)
if err != nil {
- return PageBlobClearPagesResponse{}, err
+ return pageBlobClientClearPagesResponse{}, err
}
result.LastModified = &lastModified
}
if val := resp.Header.Get("Content-MD5"); val != "" {
contentMD5, err := base64.StdEncoding.DecodeString(val)
if err != nil {
- return PageBlobClearPagesResponse{}, err
+ return pageBlobClientClearPagesResponse{}, err
}
result.ContentMD5 = contentMD5
}
if val := resp.Header.Get("x-ms-content-crc64"); val != "" {
xMSContentCRC64, err := base64.StdEncoding.DecodeString(val)
if err != nil {
- return PageBlobClearPagesResponse{}, err
+ return pageBlobClientClearPagesResponse{}, err
}
result.XMSContentCRC64 = xMSContentCRC64
}
if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" {
blobSequenceNumber, err := strconv.ParseInt(val, 10, 64)
if err != nil {
- return PageBlobClearPagesResponse{}, err
+ return pageBlobClientClearPagesResponse{}, err
}
result.BlobSequenceNumber = &blobSequenceNumber
}
@@ -151,44 +171,50 @@ func (client *pageBlobClient) clearPagesHandleResponse(resp *http.Response) (Pag
if val := resp.Header.Get("Date"); val != "" {
date, err := time.Parse(time.RFC1123, val)
if err != nil {
- return PageBlobClearPagesResponse{}, err
+ return pageBlobClientClearPagesResponse{}, err
}
result.Date = &date
}
return result, nil
}
-// CopyIncremental - The Copy Incremental operation copies a snapshot of the source page blob to a destination page blob. The snapshot is copied such that
-// only the differential changes between the previously copied
-// snapshot are transferred to the destination. The copied snapshots are complete copies of the original snapshot and can be read or copied from as usual.
-// This API is supported since REST version
+// CopyIncremental - The Copy Incremental operation copies a snapshot of the source page blob to a destination page blob.
+// The snapshot is copied such that only the differential changes between the previously copied
+// snapshot are transferred to the destination. The copied snapshots are complete copies of the original snapshot and can
+// be read or copied from as usual. This API is supported since REST version
// 2016-05-31.
-// If the operation fails it returns the *StorageError error type.
-func (client *pageBlobClient) CopyIncremental(ctx context.Context, copySource string, pageBlobCopyIncrementalOptions *PageBlobCopyIncrementalOptions, modifiedAccessConditions *ModifiedAccessConditions) (PageBlobCopyIncrementalResponse, error) {
- req, err := client.copyIncrementalCreateRequest(ctx, copySource, pageBlobCopyIncrementalOptions, modifiedAccessConditions)
+// If the operation fails it returns an *azcore.ResponseError type.
+// copySource - Specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that specifies
+// a page blob snapshot. The value should be URL-encoded as it would appear in a request
+// URI. The source blob must either be public or must be authenticated via a shared access signature.
+// pageBlobClientCopyIncrementalOptions - pageBlobClientCopyIncrementalOptions contains the optional parameters for the pageBlobClient.CopyIncremental
+// method.
+// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method.
+func (client *pageBlobClient) CopyIncremental(ctx context.Context, copySource string, pageBlobClientCopyIncrementalOptions *pageBlobClientCopyIncrementalOptions, modifiedAccessConditions *ModifiedAccessConditions) (pageBlobClientCopyIncrementalResponse, error) {
+ req, err := client.copyIncrementalCreateRequest(ctx, copySource, pageBlobClientCopyIncrementalOptions, modifiedAccessConditions)
if err != nil {
- return PageBlobCopyIncrementalResponse{}, err
+ return pageBlobClientCopyIncrementalResponse{}, err
}
- resp, err := client.con.Pipeline().Do(req)
+ resp, err := client.pl.Do(req)
if err != nil {
- return PageBlobCopyIncrementalResponse{}, err
+ return pageBlobClientCopyIncrementalResponse{}, err
}
if !runtime.HasStatusCode(resp, http.StatusAccepted) {
- return PageBlobCopyIncrementalResponse{}, runtime.NewResponseError(resp)
+ return pageBlobClientCopyIncrementalResponse{}, runtime.NewResponseError(resp)
}
return client.copyIncrementalHandleResponse(resp)
}
// copyIncrementalCreateRequest creates the CopyIncremental request.
-func (client *pageBlobClient) copyIncrementalCreateRequest(ctx context.Context, copySource string, pageBlobCopyIncrementalOptions *PageBlobCopyIncrementalOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) {
- req, err := runtime.NewRequest(ctx, http.MethodPut, client.con.Endpoint())
+func (client *pageBlobClient) copyIncrementalCreateRequest(ctx context.Context, copySource string, pageBlobClientCopyIncrementalOptions *pageBlobClientCopyIncrementalOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) {
+ req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint)
if err != nil {
return nil, err
}
reqQP := req.Raw().URL.Query()
reqQP.Set("comp", "incrementalcopy")
- if pageBlobCopyIncrementalOptions != nil && pageBlobCopyIncrementalOptions.Timeout != nil {
- reqQP.Set("timeout", strconv.FormatInt(int64(*pageBlobCopyIncrementalOptions.Timeout), 10))
+ if pageBlobClientCopyIncrementalOptions != nil && pageBlobClientCopyIncrementalOptions.Timeout != nil {
+ reqQP.Set("timeout", strconv.FormatInt(int64(*pageBlobClientCopyIncrementalOptions.Timeout), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
@@ -207,24 +233,24 @@ func (client *pageBlobClient) copyIncrementalCreateRequest(ctx context.Context,
req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags)
}
req.Raw().Header.Set("x-ms-copy-source", copySource)
- req.Raw().Header.Set("x-ms-version", "2019-12-12")
- if pageBlobCopyIncrementalOptions != nil && pageBlobCopyIncrementalOptions.RequestID != nil {
- req.Raw().Header.Set("x-ms-client-request-id", *pageBlobCopyIncrementalOptions.RequestID)
+ req.Raw().Header.Set("x-ms-version", "2020-10-02")
+ if pageBlobClientCopyIncrementalOptions != nil && pageBlobClientCopyIncrementalOptions.RequestID != nil {
+ req.Raw().Header.Set("x-ms-client-request-id", *pageBlobClientCopyIncrementalOptions.RequestID)
}
req.Raw().Header.Set("Accept", "application/xml")
return req, nil
}
// copyIncrementalHandleResponse handles the CopyIncremental response.
-func (client *pageBlobClient) copyIncrementalHandleResponse(resp *http.Response) (PageBlobCopyIncrementalResponse, error) {
- result := PageBlobCopyIncrementalResponse{RawResponse: resp}
+func (client *pageBlobClient) copyIncrementalHandleResponse(resp *http.Response) (pageBlobClientCopyIncrementalResponse, error) {
+ result := pageBlobClientCopyIncrementalResponse{RawResponse: resp}
if val := resp.Header.Get("ETag"); val != "" {
result.ETag = &val
}
if val := resp.Header.Get("Last-Modified"); val != "" {
lastModified, err := time.Parse(time.RFC1123, val)
if err != nil {
- return PageBlobCopyIncrementalResponse{}, err
+ return pageBlobClientCopyIncrementalResponse{}, err
}
result.LastModified = &lastModified
}
@@ -240,7 +266,7 @@ func (client *pageBlobClient) copyIncrementalHandleResponse(resp *http.Response)
if val := resp.Header.Get("Date"); val != "" {
date, err := time.Parse(time.RFC1123, val)
if err != nil {
- return PageBlobCopyIncrementalResponse{}, err
+ return pageBlobClientCopyIncrementalResponse{}, err
}
result.Date = &date
}
@@ -254,37 +280,47 @@ func (client *pageBlobClient) copyIncrementalHandleResponse(resp *http.Response)
}
// Create - The Create operation creates a new page blob.
-// If the operation fails it returns the *StorageError error type.
-func (client *pageBlobClient) Create(ctx context.Context, contentLength int64, blobContentLength int64, pageBlobCreateOptions *PageBlobCreateOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (PageBlobCreateResponse, error) {
- req, err := client.createCreateRequest(ctx, contentLength, blobContentLength, pageBlobCreateOptions, blobHTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions)
+// If the operation fails it returns an *azcore.ResponseError type.
+// contentLength - The length of the request.
+// blobContentLength - This header specifies the maximum size for the page blob, up to 1 TB. The page blob size must be aligned
+// to a 512-byte boundary.
+// pageBlobClientCreateOptions - pageBlobClientCreateOptions contains the optional parameters for the pageBlobClient.Create
+// method.
+// BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the blobClient.SetHTTPHeaders method.
+// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method.
+// CpkInfo - CpkInfo contains a group of parameters for the blobClient.Download method.
+// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the blobClient.SetMetadata method.
+// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method.
+func (client *pageBlobClient) Create(ctx context.Context, contentLength int64, blobContentLength int64, pageBlobClientCreateOptions *pageBlobClientCreateOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (pageBlobClientCreateResponse, error) {
+ req, err := client.createCreateRequest(ctx, contentLength, blobContentLength, pageBlobClientCreateOptions, blobHTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions)
if err != nil {
- return PageBlobCreateResponse{}, err
+ return pageBlobClientCreateResponse{}, err
}
- resp, err := client.con.Pipeline().Do(req)
+ resp, err := client.pl.Do(req)
if err != nil {
- return PageBlobCreateResponse{}, err
+ return pageBlobClientCreateResponse{}, err
}
if !runtime.HasStatusCode(resp, http.StatusCreated) {
- return PageBlobCreateResponse{}, runtime.NewResponseError(resp)
+ return pageBlobClientCreateResponse{}, runtime.NewResponseError(resp)
}
return client.createHandleResponse(resp)
}
// createCreateRequest creates the Create request.
-func (client *pageBlobClient) createCreateRequest(ctx context.Context, contentLength int64, blobContentLength int64, pageBlobCreateOptions *PageBlobCreateOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) {
- req, err := runtime.NewRequest(ctx, http.MethodPut, client.con.Endpoint())
+func (client *pageBlobClient) createCreateRequest(ctx context.Context, contentLength int64, blobContentLength int64, pageBlobClientCreateOptions *pageBlobClientCreateOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) {
+ req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint)
if err != nil {
return nil, err
}
reqQP := req.Raw().URL.Query()
- if pageBlobCreateOptions != nil && pageBlobCreateOptions.Timeout != nil {
- reqQP.Set("timeout", strconv.FormatInt(int64(*pageBlobCreateOptions.Timeout), 10))
+ if pageBlobClientCreateOptions != nil && pageBlobClientCreateOptions.Timeout != nil {
+ reqQP.Set("timeout", strconv.FormatInt(int64(*pageBlobClientCreateOptions.Timeout), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
req.Raw().Header.Set("x-ms-blob-type", "PageBlob")
req.Raw().Header.Set("Content-Length", strconv.FormatInt(contentLength, 10))
- if pageBlobCreateOptions != nil && pageBlobCreateOptions.Tier != nil {
- req.Raw().Header.Set("x-ms-access-tier", string(*pageBlobCreateOptions.Tier))
+ if pageBlobClientCreateOptions != nil && pageBlobClientCreateOptions.Tier != nil {
+ req.Raw().Header.Set("x-ms-access-tier", string(*pageBlobClientCreateOptions.Tier))
}
if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil {
req.Raw().Header.Set("x-ms-blob-content-type", *blobHTTPHeaders.BlobContentType)
@@ -301,8 +337,8 @@ func (client *pageBlobClient) createCreateRequest(ctx context.Context, contentLe
if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil {
req.Raw().Header.Set("x-ms-blob-cache-control", *blobHTTPHeaders.BlobCacheControl)
}
- if pageBlobCreateOptions != nil && pageBlobCreateOptions.Metadata != nil {
- for k, v := range pageBlobCreateOptions.Metadata {
+ if pageBlobClientCreateOptions != nil && pageBlobClientCreateOptions.Metadata != nil {
+ for k, v := range pageBlobClientCreateOptions.Metadata {
req.Raw().Header.Set("x-ms-meta-"+k, v)
}
}
@@ -319,7 +355,7 @@ func (client *pageBlobClient) createCreateRequest(ctx context.Context, contentLe
req.Raw().Header.Set("x-ms-encryption-key-sha256", *cpkInfo.EncryptionKeySHA256)
}
if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil {
- req.Raw().Header.Set("x-ms-encryption-algorithm", "AES256")
+ req.Raw().Header.Set("x-ms-encryption-algorithm", string(*cpkInfo.EncryptionAlgorithm))
}
if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil {
req.Raw().Header.Set("x-ms-encryption-scope", *cpkScopeInfo.EncryptionScope)
@@ -340,37 +376,46 @@ func (client *pageBlobClient) createCreateRequest(ctx context.Context, contentLe
req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags)
}
req.Raw().Header.Set("x-ms-blob-content-length", strconv.FormatInt(blobContentLength, 10))
- if pageBlobCreateOptions != nil && pageBlobCreateOptions.BlobSequenceNumber != nil {
- req.Raw().Header.Set("x-ms-blob-sequence-number", strconv.FormatInt(*pageBlobCreateOptions.BlobSequenceNumber, 10))
+ if pageBlobClientCreateOptions != nil && pageBlobClientCreateOptions.BlobSequenceNumber != nil {
+ req.Raw().Header.Set("x-ms-blob-sequence-number", strconv.FormatInt(*pageBlobClientCreateOptions.BlobSequenceNumber, 10))
}
- req.Raw().Header.Set("x-ms-version", "2019-12-12")
- if pageBlobCreateOptions != nil && pageBlobCreateOptions.RequestID != nil {
- req.Raw().Header.Set("x-ms-client-request-id", *pageBlobCreateOptions.RequestID)
+ req.Raw().Header.Set("x-ms-version", "2020-10-02")
+ if pageBlobClientCreateOptions != nil && pageBlobClientCreateOptions.RequestID != nil {
+ req.Raw().Header.Set("x-ms-client-request-id", *pageBlobClientCreateOptions.RequestID)
}
- if pageBlobCreateOptions != nil && pageBlobCreateOptions.BlobTagsString != nil {
- req.Raw().Header.Set("x-ms-tags", *pageBlobCreateOptions.BlobTagsString)
+ if pageBlobClientCreateOptions != nil && pageBlobClientCreateOptions.BlobTagsString != nil {
+ req.Raw().Header.Set("x-ms-tags", *pageBlobClientCreateOptions.BlobTagsString)
+ }
+ if pageBlobClientCreateOptions != nil && pageBlobClientCreateOptions.ImmutabilityPolicyExpiry != nil {
+ req.Raw().Header.Set("x-ms-immutability-policy-until-date", pageBlobClientCreateOptions.ImmutabilityPolicyExpiry.Format(time.RFC1123))
+ }
+ if pageBlobClientCreateOptions != nil && pageBlobClientCreateOptions.ImmutabilityPolicyMode != nil {
+ req.Raw().Header.Set("x-ms-immutability-policy-mode", string(*pageBlobClientCreateOptions.ImmutabilityPolicyMode))
+ }
+ if pageBlobClientCreateOptions != nil && pageBlobClientCreateOptions.LegalHold != nil {
+ req.Raw().Header.Set("x-ms-legal-hold", strconv.FormatBool(*pageBlobClientCreateOptions.LegalHold))
}
req.Raw().Header.Set("Accept", "application/xml")
return req, nil
}
// createHandleResponse handles the Create response.
-func (client *pageBlobClient) createHandleResponse(resp *http.Response) (PageBlobCreateResponse, error) {
- result := PageBlobCreateResponse{RawResponse: resp}
+func (client *pageBlobClient) createHandleResponse(resp *http.Response) (pageBlobClientCreateResponse, error) {
+ result := pageBlobClientCreateResponse{RawResponse: resp}
if val := resp.Header.Get("ETag"); val != "" {
result.ETag = &val
}
if val := resp.Header.Get("Last-Modified"); val != "" {
lastModified, err := time.Parse(time.RFC1123, val)
if err != nil {
- return PageBlobCreateResponse{}, err
+ return pageBlobClientCreateResponse{}, err
}
result.LastModified = &lastModified
}
if val := resp.Header.Get("Content-MD5"); val != "" {
contentMD5, err := base64.StdEncoding.DecodeString(val)
if err != nil {
- return PageBlobCreateResponse{}, err
+ return pageBlobClientCreateResponse{}, err
}
result.ContentMD5 = contentMD5
}
@@ -389,14 +434,14 @@ func (client *pageBlobClient) createHandleResponse(resp *http.Response) (PageBlo
if val := resp.Header.Get("Date"); val != "" {
date, err := time.Parse(time.RFC1123, val)
if err != nil {
- return PageBlobCreateResponse{}, err
+ return pageBlobClientCreateResponse{}, err
}
result.Date = &date
}
if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" {
isServerEncrypted, err := strconv.ParseBool(val)
if err != nil {
- return PageBlobCreateResponse{}, err
+ return pageBlobClientCreateResponse{}, err
}
result.IsServerEncrypted = &isServerEncrypted
}
@@ -409,40 +454,48 @@ func (client *pageBlobClient) createHandleResponse(resp *http.Response) (PageBlo
return result, nil
}
-// GetPageRanges - The Get Page Ranges operation returns the list of valid page ranges for a page blob or snapshot of a page blob
-// If the operation fails it returns the *StorageError error type.
-func (client *pageBlobClient) GetPageRanges(ctx context.Context, pageBlobGetPageRangesOptions *PageBlobGetPageRangesOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (PageBlobGetPageRangesResponse, error) {
- req, err := client.getPageRangesCreateRequest(ctx, pageBlobGetPageRangesOptions, leaseAccessConditions, modifiedAccessConditions)
- if err != nil {
- return PageBlobGetPageRangesResponse{}, err
- }
- resp, err := client.con.Pipeline().Do(req)
- if err != nil {
- return PageBlobGetPageRangesResponse{}, err
+// GetPageRanges - The Get Page Ranges operation returns the list of valid page ranges for a page blob or snapshot of a page
+// blob
+// If the operation fails it returns an *azcore.ResponseError type.
+// pageBlobClientGetPageRangesOptions - pageBlobClientGetPageRangesOptions contains the optional parameters for the pageBlobClient.GetPageRanges
+// method.
+// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method.
+// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method.
+func (client *pageBlobClient) GetPageRanges(pageBlobClientGetPageRangesOptions *pageBlobClientGetPageRangesOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) *pageBlobClientGetPageRangesPager {
+ return &pageBlobClientGetPageRangesPager{
+ client: client,
+ requester: func(ctx context.Context) (*policy.Request, error) {
+ return client.getPageRangesCreateRequest(ctx, pageBlobClientGetPageRangesOptions, leaseAccessConditions, modifiedAccessConditions)
+ },
+ advancer: func(ctx context.Context, resp pageBlobClientGetPageRangesResponse) (*policy.Request, error) {
+ return runtime.NewRequest(ctx, http.MethodGet, *resp.PageList.NextMarker)
+ },
}
- if !runtime.HasStatusCode(resp, http.StatusOK) {
- return PageBlobGetPageRangesResponse{}, runtime.NewResponseError(resp)
- }
- return client.getPageRangesHandleResponse(resp)
}
// getPageRangesCreateRequest creates the GetPageRanges request.
-func (client *pageBlobClient) getPageRangesCreateRequest(ctx context.Context, pageBlobGetPageRangesOptions *PageBlobGetPageRangesOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) {
- req, err := runtime.NewRequest(ctx, http.MethodGet, client.con.Endpoint())
+func (client *pageBlobClient) getPageRangesCreateRequest(ctx context.Context, pageBlobClientGetPageRangesOptions *pageBlobClientGetPageRangesOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) {
+ req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint)
if err != nil {
return nil, err
}
reqQP := req.Raw().URL.Query()
reqQP.Set("comp", "pagelist")
- if pageBlobGetPageRangesOptions != nil && pageBlobGetPageRangesOptions.Snapshot != nil {
- reqQP.Set("snapshot", *pageBlobGetPageRangesOptions.Snapshot)
+ if pageBlobClientGetPageRangesOptions != nil && pageBlobClientGetPageRangesOptions.Snapshot != nil {
+ reqQP.Set("snapshot", *pageBlobClientGetPageRangesOptions.Snapshot)
+ }
+ if pageBlobClientGetPageRangesOptions != nil && pageBlobClientGetPageRangesOptions.Timeout != nil {
+ reqQP.Set("timeout", strconv.FormatInt(int64(*pageBlobClientGetPageRangesOptions.Timeout), 10))
}
- if pageBlobGetPageRangesOptions != nil && pageBlobGetPageRangesOptions.Timeout != nil {
- reqQP.Set("timeout", strconv.FormatInt(int64(*pageBlobGetPageRangesOptions.Timeout), 10))
+ if pageBlobClientGetPageRangesOptions != nil && pageBlobClientGetPageRangesOptions.Marker != nil {
+ reqQP.Set("marker", *pageBlobClientGetPageRangesOptions.Marker)
+ }
+ if pageBlobClientGetPageRangesOptions != nil && pageBlobClientGetPageRangesOptions.Maxresults != nil {
+ reqQP.Set("maxresults", strconv.FormatInt(int64(*pageBlobClientGetPageRangesOptions.Maxresults), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
- if pageBlobGetPageRangesOptions != nil && pageBlobGetPageRangesOptions.Range != nil {
- req.Raw().Header.Set("x-ms-range", *pageBlobGetPageRangesOptions.Range)
+ if pageBlobClientGetPageRangesOptions != nil && pageBlobClientGetPageRangesOptions.Range != nil {
+ req.Raw().Header.Set("x-ms-range", *pageBlobClientGetPageRangesOptions.Range)
}
if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID)
@@ -462,21 +515,21 @@ func (client *pageBlobClient) getPageRangesCreateRequest(ctx context.Context, pa
if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil {
req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags)
}
- req.Raw().Header.Set("x-ms-version", "2019-12-12")
- if pageBlobGetPageRangesOptions != nil && pageBlobGetPageRangesOptions.RequestID != nil {
- req.Raw().Header.Set("x-ms-client-request-id", *pageBlobGetPageRangesOptions.RequestID)
+ req.Raw().Header.Set("x-ms-version", "2020-10-02")
+ if pageBlobClientGetPageRangesOptions != nil && pageBlobClientGetPageRangesOptions.RequestID != nil {
+ req.Raw().Header.Set("x-ms-client-request-id", *pageBlobClientGetPageRangesOptions.RequestID)
}
req.Raw().Header.Set("Accept", "application/xml")
return req, nil
}
// getPageRangesHandleResponse handles the GetPageRanges response.
-func (client *pageBlobClient) getPageRangesHandleResponse(resp *http.Response) (PageBlobGetPageRangesResponse, error) {
- result := PageBlobGetPageRangesResponse{RawResponse: resp}
+func (client *pageBlobClient) getPageRangesHandleResponse(resp *http.Response) (pageBlobClientGetPageRangesResponse, error) {
+ result := pageBlobClientGetPageRangesResponse{RawResponse: resp}
if val := resp.Header.Get("Last-Modified"); val != "" {
lastModified, err := time.Parse(time.RFC1123, val)
if err != nil {
- return PageBlobGetPageRangesResponse{}, err
+ return pageBlobClientGetPageRangesResponse{}, err
}
result.LastModified = &lastModified
}
@@ -486,7 +539,7 @@ func (client *pageBlobClient) getPageRangesHandleResponse(resp *http.Response) (
if val := resp.Header.Get("x-ms-blob-content-length"); val != "" {
blobContentLength, err := strconv.ParseInt(val, 10, 64)
if err != nil {
- return PageBlobGetPageRangesResponse{}, err
+ return pageBlobClientGetPageRangesResponse{}, err
}
result.BlobContentLength = &blobContentLength
}
@@ -502,57 +555,64 @@ func (client *pageBlobClient) getPageRangesHandleResponse(resp *http.Response) (
if val := resp.Header.Get("Date"); val != "" {
date, err := time.Parse(time.RFC1123, val)
if err != nil {
- return PageBlobGetPageRangesResponse{}, err
+ return pageBlobClientGetPageRangesResponse{}, err
}
result.Date = &date
}
if err := runtime.UnmarshalAsXML(resp, &result.PageList); err != nil {
- return PageBlobGetPageRangesResponse{}, err
+ return pageBlobClientGetPageRangesResponse{}, err
}
return result, nil
}
-// GetPageRangesDiff - The Get Page Ranges Diff operation returns the list of valid page ranges for a page blob that were changed between target blob and
-// previous snapshot.
-// If the operation fails it returns the *StorageError error type.
-func (client *pageBlobClient) GetPageRangesDiff(ctx context.Context, pageBlobGetPageRangesDiffOptions *PageBlobGetPageRangesDiffOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (PageBlobGetPageRangesDiffResponse, error) {
- req, err := client.getPageRangesDiffCreateRequest(ctx, pageBlobGetPageRangesDiffOptions, leaseAccessConditions, modifiedAccessConditions)
- if err != nil {
- return PageBlobGetPageRangesDiffResponse{}, err
- }
- resp, err := client.con.Pipeline().Do(req)
- if err != nil {
- return PageBlobGetPageRangesDiffResponse{}, err
- }
- if !runtime.HasStatusCode(resp, http.StatusOK) {
- return PageBlobGetPageRangesDiffResponse{}, runtime.NewResponseError(resp)
+// GetPageRangesDiff - The Get Page Ranges Diff operation returns the list of valid page ranges for a page blob that were
+// changed between target blob and previous snapshot.
+// If the operation fails it returns an *azcore.ResponseError type.
+// pageBlobClientGetPageRangesDiffOptions - pageBlobClientGetPageRangesDiffOptions contains the optional parameters for the
+// pageBlobClient.GetPageRangesDiff method.
+// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method.
+// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method.
+func (client *pageBlobClient) GetPageRangesDiff(pageBlobClientGetPageRangesDiffOptions *pageBlobClientGetPageRangesDiffOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) *pageBlobClientGetPageRangesDiffPager {
+ return &pageBlobClientGetPageRangesDiffPager{
+ client: client,
+ requester: func(ctx context.Context) (*policy.Request, error) {
+ return client.getPageRangesDiffCreateRequest(ctx, pageBlobClientGetPageRangesDiffOptions, leaseAccessConditions, modifiedAccessConditions)
+ },
+ advancer: func(ctx context.Context, resp pageBlobClientGetPageRangesDiffResponse) (*policy.Request, error) {
+ return runtime.NewRequest(ctx, http.MethodGet, *resp.PageList.NextMarker)
+ },
}
- return client.getPageRangesDiffHandleResponse(resp)
}
// getPageRangesDiffCreateRequest creates the GetPageRangesDiff request.
-func (client *pageBlobClient) getPageRangesDiffCreateRequest(ctx context.Context, pageBlobGetPageRangesDiffOptions *PageBlobGetPageRangesDiffOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) {
- req, err := runtime.NewRequest(ctx, http.MethodGet, client.con.Endpoint())
+func (client *pageBlobClient) getPageRangesDiffCreateRequest(ctx context.Context, pageBlobClientGetPageRangesDiffOptions *pageBlobClientGetPageRangesDiffOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) {
+ req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint)
if err != nil {
return nil, err
}
reqQP := req.Raw().URL.Query()
reqQP.Set("comp", "pagelist")
- if pageBlobGetPageRangesDiffOptions != nil && pageBlobGetPageRangesDiffOptions.Snapshot != nil {
- reqQP.Set("snapshot", *pageBlobGetPageRangesDiffOptions.Snapshot)
+ if pageBlobClientGetPageRangesDiffOptions != nil && pageBlobClientGetPageRangesDiffOptions.Snapshot != nil {
+ reqQP.Set("snapshot", *pageBlobClientGetPageRangesDiffOptions.Snapshot)
}
- if pageBlobGetPageRangesDiffOptions != nil && pageBlobGetPageRangesDiffOptions.Timeout != nil {
- reqQP.Set("timeout", strconv.FormatInt(int64(*pageBlobGetPageRangesDiffOptions.Timeout), 10))
+ if pageBlobClientGetPageRangesDiffOptions != nil && pageBlobClientGetPageRangesDiffOptions.Timeout != nil {
+ reqQP.Set("timeout", strconv.FormatInt(int64(*pageBlobClientGetPageRangesDiffOptions.Timeout), 10))
}
- if pageBlobGetPageRangesDiffOptions != nil && pageBlobGetPageRangesDiffOptions.Prevsnapshot != nil {
- reqQP.Set("prevsnapshot", *pageBlobGetPageRangesDiffOptions.Prevsnapshot)
+ if pageBlobClientGetPageRangesDiffOptions != nil && pageBlobClientGetPageRangesDiffOptions.Prevsnapshot != nil {
+ reqQP.Set("prevsnapshot", *pageBlobClientGetPageRangesDiffOptions.Prevsnapshot)
+ }
+ if pageBlobClientGetPageRangesDiffOptions != nil && pageBlobClientGetPageRangesDiffOptions.Marker != nil {
+ reqQP.Set("marker", *pageBlobClientGetPageRangesDiffOptions.Marker)
+ }
+ if pageBlobClientGetPageRangesDiffOptions != nil && pageBlobClientGetPageRangesDiffOptions.Maxresults != nil {
+ reqQP.Set("maxresults", strconv.FormatInt(int64(*pageBlobClientGetPageRangesDiffOptions.Maxresults), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
- if pageBlobGetPageRangesDiffOptions != nil && pageBlobGetPageRangesDiffOptions.PrevSnapshotURL != nil {
- req.Raw().Header.Set("x-ms-previous-snapshot-url", *pageBlobGetPageRangesDiffOptions.PrevSnapshotURL)
+ if pageBlobClientGetPageRangesDiffOptions != nil && pageBlobClientGetPageRangesDiffOptions.PrevSnapshotURL != nil {
+ req.Raw().Header.Set("x-ms-previous-snapshot-url", *pageBlobClientGetPageRangesDiffOptions.PrevSnapshotURL)
}
- if pageBlobGetPageRangesDiffOptions != nil && pageBlobGetPageRangesDiffOptions.Range != nil {
- req.Raw().Header.Set("x-ms-range", *pageBlobGetPageRangesDiffOptions.Range)
+ if pageBlobClientGetPageRangesDiffOptions != nil && pageBlobClientGetPageRangesDiffOptions.Range != nil {
+ req.Raw().Header.Set("x-ms-range", *pageBlobClientGetPageRangesDiffOptions.Range)
}
if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID)
@@ -572,21 +632,21 @@ func (client *pageBlobClient) getPageRangesDiffCreateRequest(ctx context.Context
if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil {
req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags)
}
- req.Raw().Header.Set("x-ms-version", "2019-12-12")
- if pageBlobGetPageRangesDiffOptions != nil && pageBlobGetPageRangesDiffOptions.RequestID != nil {
- req.Raw().Header.Set("x-ms-client-request-id", *pageBlobGetPageRangesDiffOptions.RequestID)
+ req.Raw().Header.Set("x-ms-version", "2020-10-02")
+ if pageBlobClientGetPageRangesDiffOptions != nil && pageBlobClientGetPageRangesDiffOptions.RequestID != nil {
+ req.Raw().Header.Set("x-ms-client-request-id", *pageBlobClientGetPageRangesDiffOptions.RequestID)
}
req.Raw().Header.Set("Accept", "application/xml")
return req, nil
}
// getPageRangesDiffHandleResponse handles the GetPageRangesDiff response.
-func (client *pageBlobClient) getPageRangesDiffHandleResponse(resp *http.Response) (PageBlobGetPageRangesDiffResponse, error) {
- result := PageBlobGetPageRangesDiffResponse{RawResponse: resp}
+func (client *pageBlobClient) getPageRangesDiffHandleResponse(resp *http.Response) (pageBlobClientGetPageRangesDiffResponse, error) {
+ result := pageBlobClientGetPageRangesDiffResponse{RawResponse: resp}
if val := resp.Header.Get("Last-Modified"); val != "" {
lastModified, err := time.Parse(time.RFC1123, val)
if err != nil {
- return PageBlobGetPageRangesDiffResponse{}, err
+ return pageBlobClientGetPageRangesDiffResponse{}, err
}
result.LastModified = &lastModified
}
@@ -596,7 +656,7 @@ func (client *pageBlobClient) getPageRangesDiffHandleResponse(resp *http.Respons
if val := resp.Header.Get("x-ms-blob-content-length"); val != "" {
blobContentLength, err := strconv.ParseInt(val, 10, 64)
if err != nil {
- return PageBlobGetPageRangesDiffResponse{}, err
+ return pageBlobClientGetPageRangesDiffResponse{}, err
}
result.BlobContentLength = &blobContentLength
}
@@ -612,43 +672,51 @@ func (client *pageBlobClient) getPageRangesDiffHandleResponse(resp *http.Respons
if val := resp.Header.Get("Date"); val != "" {
date, err := time.Parse(time.RFC1123, val)
if err != nil {
- return PageBlobGetPageRangesDiffResponse{}, err
+ return pageBlobClientGetPageRangesDiffResponse{}, err
}
result.Date = &date
}
if err := runtime.UnmarshalAsXML(resp, &result.PageList); err != nil {
- return PageBlobGetPageRangesDiffResponse{}, err
+ return pageBlobClientGetPageRangesDiffResponse{}, err
}
return result, nil
}
// Resize - Resize the Blob
-// If the operation fails it returns the *StorageError error type.
-func (client *pageBlobClient) Resize(ctx context.Context, blobContentLength int64, pageBlobResizeOptions *PageBlobResizeOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (PageBlobResizeResponse, error) {
- req, err := client.resizeCreateRequest(ctx, blobContentLength, pageBlobResizeOptions, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions)
+// If the operation fails it returns an *azcore.ResponseError type.
+// blobContentLength - This header specifies the maximum size for the page blob, up to 1 TB. The page blob size must be aligned
+// to a 512-byte boundary.
+// pageBlobClientResizeOptions - pageBlobClientResizeOptions contains the optional parameters for the pageBlobClient.Resize
+// method.
+// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method.
+// CpkInfo - CpkInfo contains a group of parameters for the blobClient.Download method.
+// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the blobClient.SetMetadata method.
+// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method.
+func (client *pageBlobClient) Resize(ctx context.Context, blobContentLength int64, pageBlobClientResizeOptions *pageBlobClientResizeOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (pageBlobClientResizeResponse, error) {
+ req, err := client.resizeCreateRequest(ctx, blobContentLength, pageBlobClientResizeOptions, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions)
if err != nil {
- return PageBlobResizeResponse{}, err
+ return pageBlobClientResizeResponse{}, err
}
- resp, err := client.con.Pipeline().Do(req)
+ resp, err := client.pl.Do(req)
if err != nil {
- return PageBlobResizeResponse{}, err
+ return pageBlobClientResizeResponse{}, err
}
if !runtime.HasStatusCode(resp, http.StatusOK) {
- return PageBlobResizeResponse{}, runtime.NewResponseError(resp)
+ return pageBlobClientResizeResponse{}, runtime.NewResponseError(resp)
}
return client.resizeHandleResponse(resp)
}
// resizeCreateRequest creates the Resize request.
-func (client *pageBlobClient) resizeCreateRequest(ctx context.Context, blobContentLength int64, pageBlobResizeOptions *PageBlobResizeOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) {
- req, err := runtime.NewRequest(ctx, http.MethodPut, client.con.Endpoint())
+func (client *pageBlobClient) resizeCreateRequest(ctx context.Context, blobContentLength int64, pageBlobClientResizeOptions *pageBlobClientResizeOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) {
+ req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint)
if err != nil {
return nil, err
}
reqQP := req.Raw().URL.Query()
reqQP.Set("comp", "properties")
- if pageBlobResizeOptions != nil && pageBlobResizeOptions.Timeout != nil {
- reqQP.Set("timeout", strconv.FormatInt(int64(*pageBlobResizeOptions.Timeout), 10))
+ if pageBlobClientResizeOptions != nil && pageBlobClientResizeOptions.Timeout != nil {
+ reqQP.Set("timeout", strconv.FormatInt(int64(*pageBlobClientResizeOptions.Timeout), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
@@ -661,7 +729,7 @@ func (client *pageBlobClient) resizeCreateRequest(ctx context.Context, blobConte
req.Raw().Header.Set("x-ms-encryption-key-sha256", *cpkInfo.EncryptionKeySHA256)
}
if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil {
- req.Raw().Header.Set("x-ms-encryption-algorithm", "AES256")
+ req.Raw().Header.Set("x-ms-encryption-algorithm", string(*cpkInfo.EncryptionAlgorithm))
}
if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil {
req.Raw().Header.Set("x-ms-encryption-scope", *cpkScopeInfo.EncryptionScope)
@@ -682,31 +750,31 @@ func (client *pageBlobClient) resizeCreateRequest(ctx context.Context, blobConte
req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags)
}
req.Raw().Header.Set("x-ms-blob-content-length", strconv.FormatInt(blobContentLength, 10))
- req.Raw().Header.Set("x-ms-version", "2019-12-12")
- if pageBlobResizeOptions != nil && pageBlobResizeOptions.RequestID != nil {
- req.Raw().Header.Set("x-ms-client-request-id", *pageBlobResizeOptions.RequestID)
+ req.Raw().Header.Set("x-ms-version", "2020-10-02")
+ if pageBlobClientResizeOptions != nil && pageBlobClientResizeOptions.RequestID != nil {
+ req.Raw().Header.Set("x-ms-client-request-id", *pageBlobClientResizeOptions.RequestID)
}
req.Raw().Header.Set("Accept", "application/xml")
return req, nil
}
// resizeHandleResponse handles the Resize response.
-func (client *pageBlobClient) resizeHandleResponse(resp *http.Response) (PageBlobResizeResponse, error) {
- result := PageBlobResizeResponse{RawResponse: resp}
+func (client *pageBlobClient) resizeHandleResponse(resp *http.Response) (pageBlobClientResizeResponse, error) {
+ result := pageBlobClientResizeResponse{RawResponse: resp}
if val := resp.Header.Get("ETag"); val != "" {
result.ETag = &val
}
if val := resp.Header.Get("Last-Modified"); val != "" {
lastModified, err := time.Parse(time.RFC1123, val)
if err != nil {
- return PageBlobResizeResponse{}, err
+ return pageBlobClientResizeResponse{}, err
}
result.LastModified = &lastModified
}
if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" {
blobSequenceNumber, err := strconv.ParseInt(val, 10, 64)
if err != nil {
- return PageBlobResizeResponse{}, err
+ return pageBlobClientResizeResponse{}, err
}
result.BlobSequenceNumber = &blobSequenceNumber
}
@@ -722,7 +790,7 @@ func (client *pageBlobClient) resizeHandleResponse(resp *http.Response) (PageBlo
if val := resp.Header.Get("Date"); val != "" {
date, err := time.Parse(time.RFC1123, val)
if err != nil {
- return PageBlobResizeResponse{}, err
+ return pageBlobClientResizeResponse{}, err
}
result.Date = &date
}
@@ -730,32 +798,38 @@ func (client *pageBlobClient) resizeHandleResponse(resp *http.Response) (PageBlo
}
// UpdateSequenceNumber - Update the sequence number of the blob
-// If the operation fails it returns the *StorageError error type.
-func (client *pageBlobClient) UpdateSequenceNumber(ctx context.Context, sequenceNumberAction SequenceNumberActionType, pageBlobUpdateSequenceNumberOptions *PageBlobUpdateSequenceNumberOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (PageBlobUpdateSequenceNumberResponse, error) {
- req, err := client.updateSequenceNumberCreateRequest(ctx, sequenceNumberAction, pageBlobUpdateSequenceNumberOptions, leaseAccessConditions, modifiedAccessConditions)
+// If the operation fails it returns an *azcore.ResponseError type.
+// sequenceNumberAction - Required if the x-ms-blob-sequence-number header is set for the request. This property applies to
+// page blobs only. This property indicates how the service should modify the blob's sequence number
+// pageBlobClientUpdateSequenceNumberOptions - pageBlobClientUpdateSequenceNumberOptions contains the optional parameters
+// for the pageBlobClient.UpdateSequenceNumber method.
+// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method.
+// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method.
+func (client *pageBlobClient) UpdateSequenceNumber(ctx context.Context, sequenceNumberAction SequenceNumberActionType, pageBlobClientUpdateSequenceNumberOptions *pageBlobClientUpdateSequenceNumberOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (pageBlobClientUpdateSequenceNumberResponse, error) {
+ req, err := client.updateSequenceNumberCreateRequest(ctx, sequenceNumberAction, pageBlobClientUpdateSequenceNumberOptions, leaseAccessConditions, modifiedAccessConditions)
if err != nil {
- return PageBlobUpdateSequenceNumberResponse{}, err
+ return pageBlobClientUpdateSequenceNumberResponse{}, err
}
- resp, err := client.con.Pipeline().Do(req)
+ resp, err := client.pl.Do(req)
if err != nil {
- return PageBlobUpdateSequenceNumberResponse{}, err
+ return pageBlobClientUpdateSequenceNumberResponse{}, err
}
if !runtime.HasStatusCode(resp, http.StatusOK) {
- return PageBlobUpdateSequenceNumberResponse{}, runtime.NewResponseError(resp)
+ return pageBlobClientUpdateSequenceNumberResponse{}, runtime.NewResponseError(resp)
}
return client.updateSequenceNumberHandleResponse(resp)
}
// updateSequenceNumberCreateRequest creates the UpdateSequenceNumber request.
-func (client *pageBlobClient) updateSequenceNumberCreateRequest(ctx context.Context, sequenceNumberAction SequenceNumberActionType, pageBlobUpdateSequenceNumberOptions *PageBlobUpdateSequenceNumberOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) {
- req, err := runtime.NewRequest(ctx, http.MethodPut, client.con.Endpoint())
+func (client *pageBlobClient) updateSequenceNumberCreateRequest(ctx context.Context, sequenceNumberAction SequenceNumberActionType, pageBlobClientUpdateSequenceNumberOptions *pageBlobClientUpdateSequenceNumberOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) {
+ req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint)
if err != nil {
return nil, err
}
reqQP := req.Raw().URL.Query()
reqQP.Set("comp", "properties")
- if pageBlobUpdateSequenceNumberOptions != nil && pageBlobUpdateSequenceNumberOptions.Timeout != nil {
- reqQP.Set("timeout", strconv.FormatInt(int64(*pageBlobUpdateSequenceNumberOptions.Timeout), 10))
+ if pageBlobClientUpdateSequenceNumberOptions != nil && pageBlobClientUpdateSequenceNumberOptions.Timeout != nil {
+ reqQP.Set("timeout", strconv.FormatInt(int64(*pageBlobClientUpdateSequenceNumberOptions.Timeout), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
@@ -777,34 +851,34 @@ func (client *pageBlobClient) updateSequenceNumberCreateRequest(ctx context.Cont
req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags)
}
req.Raw().Header.Set("x-ms-sequence-number-action", string(sequenceNumberAction))
- if pageBlobUpdateSequenceNumberOptions != nil && pageBlobUpdateSequenceNumberOptions.BlobSequenceNumber != nil {
- req.Raw().Header.Set("x-ms-blob-sequence-number", strconv.FormatInt(*pageBlobUpdateSequenceNumberOptions.BlobSequenceNumber, 10))
+ if pageBlobClientUpdateSequenceNumberOptions != nil && pageBlobClientUpdateSequenceNumberOptions.BlobSequenceNumber != nil {
+ req.Raw().Header.Set("x-ms-blob-sequence-number", strconv.FormatInt(*pageBlobClientUpdateSequenceNumberOptions.BlobSequenceNumber, 10))
}
- req.Raw().Header.Set("x-ms-version", "2019-12-12")
- if pageBlobUpdateSequenceNumberOptions != nil && pageBlobUpdateSequenceNumberOptions.RequestID != nil {
- req.Raw().Header.Set("x-ms-client-request-id", *pageBlobUpdateSequenceNumberOptions.RequestID)
+ req.Raw().Header.Set("x-ms-version", "2020-10-02")
+ if pageBlobClientUpdateSequenceNumberOptions != nil && pageBlobClientUpdateSequenceNumberOptions.RequestID != nil {
+ req.Raw().Header.Set("x-ms-client-request-id", *pageBlobClientUpdateSequenceNumberOptions.RequestID)
}
req.Raw().Header.Set("Accept", "application/xml")
return req, nil
}
// updateSequenceNumberHandleResponse handles the UpdateSequenceNumber response.
-func (client *pageBlobClient) updateSequenceNumberHandleResponse(resp *http.Response) (PageBlobUpdateSequenceNumberResponse, error) {
- result := PageBlobUpdateSequenceNumberResponse{RawResponse: resp}
+func (client *pageBlobClient) updateSequenceNumberHandleResponse(resp *http.Response) (pageBlobClientUpdateSequenceNumberResponse, error) {
+ result := pageBlobClientUpdateSequenceNumberResponse{RawResponse: resp}
if val := resp.Header.Get("ETag"); val != "" {
result.ETag = &val
}
if val := resp.Header.Get("Last-Modified"); val != "" {
lastModified, err := time.Parse(time.RFC1123, val)
if err != nil {
- return PageBlobUpdateSequenceNumberResponse{}, err
+ return pageBlobClientUpdateSequenceNumberResponse{}, err
}
result.LastModified = &lastModified
}
if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" {
blobSequenceNumber, err := strconv.ParseInt(val, 10, 64)
if err != nil {
- return PageBlobUpdateSequenceNumberResponse{}, err
+ return pageBlobClientUpdateSequenceNumberResponse{}, err
}
result.BlobSequenceNumber = &blobSequenceNumber
}
@@ -820,7 +894,7 @@ func (client *pageBlobClient) updateSequenceNumberHandleResponse(resp *http.Resp
if val := resp.Header.Get("Date"); val != "" {
date, err := time.Parse(time.RFC1123, val)
if err != nil {
- return PageBlobUpdateSequenceNumberResponse{}, err
+ return pageBlobClientUpdateSequenceNumberResponse{}, err
}
result.Date = &date
}
@@ -828,44 +902,54 @@ func (client *pageBlobClient) updateSequenceNumberHandleResponse(resp *http.Resp
}
// UploadPages - The Upload Pages operation writes a range of pages to a page blob
-// If the operation fails it returns the *StorageError error type.
-func (client *pageBlobClient) UploadPages(ctx context.Context, contentLength int64, body io.ReadSeekCloser, pageBlobUploadPagesOptions *PageBlobUploadPagesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (PageBlobUploadPagesResponse, error) {
- req, err := client.uploadPagesCreateRequest(ctx, contentLength, body, pageBlobUploadPagesOptions, leaseAccessConditions, cpkInfo, cpkScopeInfo, sequenceNumberAccessConditions, modifiedAccessConditions)
+// If the operation fails it returns an *azcore.ResponseError type.
+// contentLength - The length of the request.
+// body - Initial data
+// pageBlobClientUploadPagesOptions - pageBlobClientUploadPagesOptions contains the optional parameters for the pageBlobClient.UploadPages
+// method.
+// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method.
+// CpkInfo - CpkInfo contains a group of parameters for the blobClient.Download method.
+// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the blobClient.SetMetadata method.
+// SequenceNumberAccessConditions - SequenceNumberAccessConditions contains a group of parameters for the pageBlobClient.UploadPages
+// method.
+// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method.
+func (client *pageBlobClient) UploadPages(ctx context.Context, contentLength int64, body io.ReadSeekCloser, pageBlobClientUploadPagesOptions *pageBlobClientUploadPagesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (pageBlobClientUploadPagesResponse, error) {
+ req, err := client.uploadPagesCreateRequest(ctx, contentLength, body, pageBlobClientUploadPagesOptions, leaseAccessConditions, cpkInfo, cpkScopeInfo, sequenceNumberAccessConditions, modifiedAccessConditions)
if err != nil {
- return PageBlobUploadPagesResponse{}, err
+ return pageBlobClientUploadPagesResponse{}, err
}
- resp, err := client.con.Pipeline().Do(req)
+ resp, err := client.pl.Do(req)
if err != nil {
- return PageBlobUploadPagesResponse{}, err
+ return pageBlobClientUploadPagesResponse{}, err
}
if !runtime.HasStatusCode(resp, http.StatusCreated) {
- return PageBlobUploadPagesResponse{}, runtime.NewResponseError(resp)
+ return pageBlobClientUploadPagesResponse{}, runtime.NewResponseError(resp)
}
return client.uploadPagesHandleResponse(resp)
}
// uploadPagesCreateRequest creates the UploadPages request.
-func (client *pageBlobClient) uploadPagesCreateRequest(ctx context.Context, contentLength int64, body io.ReadSeekCloser, pageBlobUploadPagesOptions *PageBlobUploadPagesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) {
- req, err := runtime.NewRequest(ctx, http.MethodPut, client.con.Endpoint())
+func (client *pageBlobClient) uploadPagesCreateRequest(ctx context.Context, contentLength int64, body io.ReadSeekCloser, pageBlobClientUploadPagesOptions *pageBlobClientUploadPagesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) {
+ req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint)
if err != nil {
return nil, err
}
reqQP := req.Raw().URL.Query()
reqQP.Set("comp", "page")
- if pageBlobUploadPagesOptions != nil && pageBlobUploadPagesOptions.Timeout != nil {
- reqQP.Set("timeout", strconv.FormatInt(int64(*pageBlobUploadPagesOptions.Timeout), 10))
+ if pageBlobClientUploadPagesOptions != nil && pageBlobClientUploadPagesOptions.Timeout != nil {
+ reqQP.Set("timeout", strconv.FormatInt(int64(*pageBlobClientUploadPagesOptions.Timeout), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
req.Raw().Header.Set("x-ms-page-write", "update")
req.Raw().Header.Set("Content-Length", strconv.FormatInt(contentLength, 10))
- if pageBlobUploadPagesOptions != nil && pageBlobUploadPagesOptions.TransactionalContentMD5 != nil {
- req.Raw().Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(pageBlobUploadPagesOptions.TransactionalContentMD5))
+ if pageBlobClientUploadPagesOptions != nil && pageBlobClientUploadPagesOptions.TransactionalContentMD5 != nil {
+ req.Raw().Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(pageBlobClientUploadPagesOptions.TransactionalContentMD5))
}
- if pageBlobUploadPagesOptions != nil && pageBlobUploadPagesOptions.TransactionalContentCRC64 != nil {
- req.Raw().Header.Set("x-ms-content-crc64", base64.StdEncoding.EncodeToString(pageBlobUploadPagesOptions.TransactionalContentCRC64))
+ if pageBlobClientUploadPagesOptions != nil && pageBlobClientUploadPagesOptions.TransactionalContentCRC64 != nil {
+ req.Raw().Header.Set("x-ms-content-crc64", base64.StdEncoding.EncodeToString(pageBlobClientUploadPagesOptions.TransactionalContentCRC64))
}
- if pageBlobUploadPagesOptions != nil && pageBlobUploadPagesOptions.Range != nil {
- req.Raw().Header.Set("x-ms-range", *pageBlobUploadPagesOptions.Range)
+ if pageBlobClientUploadPagesOptions != nil && pageBlobClientUploadPagesOptions.Range != nil {
+ req.Raw().Header.Set("x-ms-range", *pageBlobClientUploadPagesOptions.Range)
}
if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID)
@@ -877,7 +961,7 @@ func (client *pageBlobClient) uploadPagesCreateRequest(ctx context.Context, cont
req.Raw().Header.Set("x-ms-encryption-key-sha256", *cpkInfo.EncryptionKeySHA256)
}
if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil {
- req.Raw().Header.Set("x-ms-encryption-algorithm", "AES256")
+ req.Raw().Header.Set("x-ms-encryption-algorithm", string(*cpkInfo.EncryptionAlgorithm))
}
if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil {
req.Raw().Header.Set("x-ms-encryption-scope", *cpkScopeInfo.EncryptionScope)
@@ -906,45 +990,45 @@ func (client *pageBlobClient) uploadPagesCreateRequest(ctx context.Context, cont
if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil {
req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags)
}
- req.Raw().Header.Set("x-ms-version", "2019-12-12")
- if pageBlobUploadPagesOptions != nil && pageBlobUploadPagesOptions.RequestID != nil {
- req.Raw().Header.Set("x-ms-client-request-id", *pageBlobUploadPagesOptions.RequestID)
+ req.Raw().Header.Set("x-ms-version", "2020-10-02")
+ if pageBlobClientUploadPagesOptions != nil && pageBlobClientUploadPagesOptions.RequestID != nil {
+ req.Raw().Header.Set("x-ms-client-request-id", *pageBlobClientUploadPagesOptions.RequestID)
}
req.Raw().Header.Set("Accept", "application/xml")
return req, req.SetBody(body, "application/octet-stream")
}
// uploadPagesHandleResponse handles the UploadPages response.
-func (client *pageBlobClient) uploadPagesHandleResponse(resp *http.Response) (PageBlobUploadPagesResponse, error) {
- result := PageBlobUploadPagesResponse{RawResponse: resp}
+func (client *pageBlobClient) uploadPagesHandleResponse(resp *http.Response) (pageBlobClientUploadPagesResponse, error) {
+ result := pageBlobClientUploadPagesResponse{RawResponse: resp}
if val := resp.Header.Get("ETag"); val != "" {
result.ETag = &val
}
if val := resp.Header.Get("Last-Modified"); val != "" {
lastModified, err := time.Parse(time.RFC1123, val)
if err != nil {
- return PageBlobUploadPagesResponse{}, err
+ return pageBlobClientUploadPagesResponse{}, err
}
result.LastModified = &lastModified
}
if val := resp.Header.Get("Content-MD5"); val != "" {
contentMD5, err := base64.StdEncoding.DecodeString(val)
if err != nil {
- return PageBlobUploadPagesResponse{}, err
+ return pageBlobClientUploadPagesResponse{}, err
}
result.ContentMD5 = contentMD5
}
if val := resp.Header.Get("x-ms-content-crc64"); val != "" {
xMSContentCRC64, err := base64.StdEncoding.DecodeString(val)
if err != nil {
- return PageBlobUploadPagesResponse{}, err
+ return pageBlobClientUploadPagesResponse{}, err
}
result.XMSContentCRC64 = xMSContentCRC64
}
if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" {
blobSequenceNumber, err := strconv.ParseInt(val, 10, 64)
if err != nil {
- return PageBlobUploadPagesResponse{}, err
+ return pageBlobClientUploadPagesResponse{}, err
}
result.BlobSequenceNumber = &blobSequenceNumber
}
@@ -960,14 +1044,14 @@ func (client *pageBlobClient) uploadPagesHandleResponse(resp *http.Response) (Pa
if val := resp.Header.Get("Date"); val != "" {
date, err := time.Parse(time.RFC1123, val)
if err != nil {
- return PageBlobUploadPagesResponse{}, err
+ return pageBlobClientUploadPagesResponse{}, err
}
result.Date = &date
}
if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" {
isServerEncrypted, err := strconv.ParseBool(val)
if err != nil {
- return PageBlobUploadPagesResponse{}, err
+ return pageBlobClientUploadPagesResponse{}, err
}
result.IsServerEncrypted = &isServerEncrypted
}
@@ -980,43 +1064,60 @@ func (client *pageBlobClient) uploadPagesHandleResponse(resp *http.Response) (Pa
return result, nil
}
-// UploadPagesFromURL - The Upload Pages operation writes a range of pages to a page blob where the contents are read from a URL
-// If the operation fails it returns the *StorageError error type.
-func (client *pageBlobClient) UploadPagesFromURL(ctx context.Context, sourceURL string, sourceRange string, contentLength int64, rangeParam string, pageBlobUploadPagesFromURLOptions *PageBlobUploadPagesFromURLOptions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, leaseAccessConditions *LeaseAccessConditions, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (PageBlobUploadPagesFromURLResponse, error) {
- req, err := client.uploadPagesFromURLCreateRequest(ctx, sourceURL, sourceRange, contentLength, rangeParam, pageBlobUploadPagesFromURLOptions, cpkInfo, cpkScopeInfo, leaseAccessConditions, sequenceNumberAccessConditions, modifiedAccessConditions, sourceModifiedAccessConditions)
+// UploadPagesFromURL - The Upload Pages operation writes a range of pages to a page blob where the contents are read from
+// a URL
+// If the operation fails it returns an *azcore.ResponseError type.
+// sourceURL - Specify a URL to the copy source.
+// sourceRange - Bytes of source data in the specified range. The length of this range should match the ContentLength header
+// and x-ms-range/Range destination range header.
+// contentLength - The length of the request.
+// rangeParam - The range of bytes to which the source range would be written. The range should be 512 aligned and range-end
+// is required.
+// pageBlobClientUploadPagesFromURLOptions - pageBlobClientUploadPagesFromURLOptions contains the optional parameters for
+// the pageBlobClient.UploadPagesFromURL method.
+// CpkInfo - CpkInfo contains a group of parameters for the blobClient.Download method.
+// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the blobClient.SetMetadata method.
+// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method.
+// SequenceNumberAccessConditions - SequenceNumberAccessConditions contains a group of parameters for the pageBlobClient.UploadPages
+// method.
+// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method.
+// SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the blobClient.StartCopyFromURL
+// method.
+func (client *pageBlobClient) UploadPagesFromURL(ctx context.Context, sourceURL string, sourceRange string, contentLength int64, rangeParam string, pageBlobClientUploadPagesFromURLOptions *pageBlobClientUploadPagesFromURLOptions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, leaseAccessConditions *LeaseAccessConditions, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (pageBlobClientUploadPagesFromURLResponse, error) {
+ req, err := client.uploadPagesFromURLCreateRequest(ctx, sourceURL, sourceRange, contentLength, rangeParam, pageBlobClientUploadPagesFromURLOptions, cpkInfo, cpkScopeInfo, leaseAccessConditions, sequenceNumberAccessConditions, modifiedAccessConditions, sourceModifiedAccessConditions)
if err != nil {
- return PageBlobUploadPagesFromURLResponse{}, err
+ return pageBlobClientUploadPagesFromURLResponse{}, err
}
- resp, err := client.con.Pipeline().Do(req)
+ resp, err := client.pl.Do(req)
if err != nil {
- return PageBlobUploadPagesFromURLResponse{}, err
+ return pageBlobClientUploadPagesFromURLResponse{}, err
}
if !runtime.HasStatusCode(resp, http.StatusCreated) {
- return PageBlobUploadPagesFromURLResponse{}, runtime.NewResponseError(resp)
+ return pageBlobClientUploadPagesFromURLResponse{}, runtime.NewResponseError(resp)
}
return client.uploadPagesFromURLHandleResponse(resp)
}
// uploadPagesFromURLCreateRequest creates the UploadPagesFromURL request.
-func (client *pageBlobClient) uploadPagesFromURLCreateRequest(ctx context.Context, sourceURL string, sourceRange string, contentLength int64, rangeParam string, pageBlobUploadPagesFromURLOptions *PageBlobUploadPagesFromURLOptions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, leaseAccessConditions *LeaseAccessConditions, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (*policy.Request, error) {
- req, err := runtime.NewRequest(ctx, http.MethodPut, client.con.Endpoint())
+func (client *pageBlobClient) uploadPagesFromURLCreateRequest(ctx context.Context, sourceURL string, sourceRange string, contentLength int64, rangeParam string, pageBlobClientUploadPagesFromURLOptions *pageBlobClientUploadPagesFromURLOptions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, leaseAccessConditions *LeaseAccessConditions, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (*policy.Request, error) {
+ req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint)
if err != nil {
return nil, err
}
reqQP := req.Raw().URL.Query()
reqQP.Set("comp", "page")
- if pageBlobUploadPagesFromURLOptions != nil && pageBlobUploadPagesFromURLOptions.Timeout != nil {
- reqQP.Set("timeout", strconv.FormatInt(int64(*pageBlobUploadPagesFromURLOptions.Timeout), 10))
+ if pageBlobClientUploadPagesFromURLOptions != nil && pageBlobClientUploadPagesFromURLOptions.Timeout != nil {
+ reqQP.Set("timeout", strconv.FormatInt(int64(*pageBlobClientUploadPagesFromURLOptions.Timeout), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
req.Raw().Header.Set("x-ms-page-write", "update")
req.Raw().Header.Set("x-ms-copy-source", sourceURL)
req.Raw().Header.Set("x-ms-source-range", sourceRange)
- if pageBlobUploadPagesFromURLOptions != nil && pageBlobUploadPagesFromURLOptions.SourceContentMD5 != nil {
- req.Raw().Header.Set("x-ms-source-content-md5", base64.StdEncoding.EncodeToString(pageBlobUploadPagesFromURLOptions.SourceContentMD5))
+ if pageBlobClientUploadPagesFromURLOptions != nil && pageBlobClientUploadPagesFromURLOptions.SourceContentMD5 != nil {
+ req.Raw().Header.Set("x-ms-source-content-md5", base64.StdEncoding.EncodeToString(pageBlobClientUploadPagesFromURLOptions.SourceContentMD5))
}
- if pageBlobUploadPagesFromURLOptions != nil && pageBlobUploadPagesFromURLOptions.SourceContentcrc64 != nil {
- req.Raw().Header.Set("x-ms-source-content-crc64", base64.StdEncoding.EncodeToString(pageBlobUploadPagesFromURLOptions.SourceContentcrc64))
+ if pageBlobClientUploadPagesFromURLOptions != nil && pageBlobClientUploadPagesFromURLOptions.SourceContentcrc64 != nil {
+ req.Raw().Header.Set("x-ms-source-content-crc64", base64.StdEncoding.EncodeToString(pageBlobClientUploadPagesFromURLOptions.SourceContentcrc64))
}
req.Raw().Header.Set("Content-Length", strconv.FormatInt(contentLength, 10))
req.Raw().Header.Set("x-ms-range", rangeParam)
@@ -1027,7 +1128,7 @@ func (client *pageBlobClient) uploadPagesFromURLCreateRequest(ctx context.Contex
req.Raw().Header.Set("x-ms-encryption-key-sha256", *cpkInfo.EncryptionKeySHA256)
}
if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil {
- req.Raw().Header.Set("x-ms-encryption-algorithm", "AES256")
+ req.Raw().Header.Set("x-ms-encryption-algorithm", string(*cpkInfo.EncryptionAlgorithm))
}
if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil {
req.Raw().Header.Set("x-ms-encryption-scope", *cpkScopeInfo.EncryptionScope)
@@ -1071,45 +1172,48 @@ func (client *pageBlobClient) uploadPagesFromURLCreateRequest(ctx context.Contex
if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil {
req.Raw().Header.Set("x-ms-source-if-none-match", *sourceModifiedAccessConditions.SourceIfNoneMatch)
}
- req.Raw().Header.Set("x-ms-version", "2019-12-12")
- if pageBlobUploadPagesFromURLOptions != nil && pageBlobUploadPagesFromURLOptions.RequestID != nil {
- req.Raw().Header.Set("x-ms-client-request-id", *pageBlobUploadPagesFromURLOptions.RequestID)
+ req.Raw().Header.Set("x-ms-version", "2020-10-02")
+ if pageBlobClientUploadPagesFromURLOptions != nil && pageBlobClientUploadPagesFromURLOptions.RequestID != nil {
+ req.Raw().Header.Set("x-ms-client-request-id", *pageBlobClientUploadPagesFromURLOptions.RequestID)
+ }
+ if pageBlobClientUploadPagesFromURLOptions != nil && pageBlobClientUploadPagesFromURLOptions.CopySourceAuthorization != nil {
+ req.Raw().Header.Set("x-ms-copy-source-authorization", *pageBlobClientUploadPagesFromURLOptions.CopySourceAuthorization)
}
req.Raw().Header.Set("Accept", "application/xml")
return req, nil
}
// uploadPagesFromURLHandleResponse handles the UploadPagesFromURL response.
-func (client *pageBlobClient) uploadPagesFromURLHandleResponse(resp *http.Response) (PageBlobUploadPagesFromURLResponse, error) {
- result := PageBlobUploadPagesFromURLResponse{RawResponse: resp}
+func (client *pageBlobClient) uploadPagesFromURLHandleResponse(resp *http.Response) (pageBlobClientUploadPagesFromURLResponse, error) {
+ result := pageBlobClientUploadPagesFromURLResponse{RawResponse: resp}
if val := resp.Header.Get("ETag"); val != "" {
result.ETag = &val
}
if val := resp.Header.Get("Last-Modified"); val != "" {
lastModified, err := time.Parse(time.RFC1123, val)
if err != nil {
- return PageBlobUploadPagesFromURLResponse{}, err
+ return pageBlobClientUploadPagesFromURLResponse{}, err
}
result.LastModified = &lastModified
}
if val := resp.Header.Get("Content-MD5"); val != "" {
contentMD5, err := base64.StdEncoding.DecodeString(val)
if err != nil {
- return PageBlobUploadPagesFromURLResponse{}, err
+ return pageBlobClientUploadPagesFromURLResponse{}, err
}
result.ContentMD5 = contentMD5
}
if val := resp.Header.Get("x-ms-content-crc64"); val != "" {
xMSContentCRC64, err := base64.StdEncoding.DecodeString(val)
if err != nil {
- return PageBlobUploadPagesFromURLResponse{}, err
+ return pageBlobClientUploadPagesFromURLResponse{}, err
}
result.XMSContentCRC64 = xMSContentCRC64
}
if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" {
blobSequenceNumber, err := strconv.ParseInt(val, 10, 64)
if err != nil {
- return PageBlobUploadPagesFromURLResponse{}, err
+ return pageBlobClientUploadPagesFromURLResponse{}, err
}
result.BlobSequenceNumber = &blobSequenceNumber
}
@@ -1122,14 +1226,14 @@ func (client *pageBlobClient) uploadPagesFromURLHandleResponse(resp *http.Respon
if val := resp.Header.Get("Date"); val != "" {
date, err := time.Parse(time.RFC1123, val)
if err != nil {
- return PageBlobUploadPagesFromURLResponse{}, err
+ return pageBlobClientUploadPagesFromURLResponse{}, err
}
result.Date = &date
}
if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" {
isServerEncrypted, err := strconv.ParseBool(val)
if err != nil {
- return PageBlobUploadPagesFromURLResponse{}, err
+ return pageBlobClientUploadPagesFromURLResponse{}, err
}
result.IsServerEncrypted = &isServerEncrypted
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_pagers.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_pagers.go
index d3ad53c2107..9f0cc4629fd 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_pagers.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_pagers.go
@@ -1,5 +1,5 @@
-//go:build go1.16
-// +build go1.16
+//go:build go1.18
+// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
@@ -10,30 +10,29 @@ package azblob
import (
"context"
- "net/http"
- "reflect"
-
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
+ "net/http"
+ "reflect"
)
-// ContainerListBlobFlatSegmentPager provides operations for iterating over paged responses.
-type ContainerListBlobFlatSegmentPager struct {
+// containerClientListBlobFlatSegmentPager provides operations for iterating over paged responses.
+type containerClientListBlobFlatSegmentPager struct {
client *containerClient
- current ContainerListBlobFlatSegmentResponse
+ current containerClientListBlobFlatSegmentResponse
err error
requester func(context.Context) (*policy.Request, error)
- advancer func(context.Context, ContainerListBlobFlatSegmentResponse) (*policy.Request, error)
+ advancer func(context.Context, containerClientListBlobFlatSegmentResponse) (*policy.Request, error)
}
// Err returns the last error encountered while paging.
-func (p *ContainerListBlobFlatSegmentPager) Err() error {
+func (p *containerClientListBlobFlatSegmentPager) Err() error {
return p.err
}
// NextPage returns true if the pager advanced to the next page.
// Returns false if there are no more pages or an error occurred.
-func (p *ContainerListBlobFlatSegmentPager) NextPage(ctx context.Context) bool {
+func (p *containerClientListBlobFlatSegmentPager) NextPage(ctx context.Context) bool {
var req *policy.Request
var err error
if !reflect.ValueOf(p.current).IsZero() {
@@ -48,7 +47,7 @@ func (p *ContainerListBlobFlatSegmentPager) NextPage(ctx context.Context) bool {
p.err = err
return false
}
- resp, err := p.client.con.Pipeline().Do(req)
+ resp, err := p.client.pl.Do(req)
if err != nil {
p.err = err
return false
@@ -66,28 +65,28 @@ func (p *ContainerListBlobFlatSegmentPager) NextPage(ctx context.Context) bool {
return true
}
-// PageResponse returns the current ContainerListBlobFlatSegmentResponse page.
-func (p *ContainerListBlobFlatSegmentPager) PageResponse() ContainerListBlobFlatSegmentResponse {
+// PageResponse returns the current containerClientListBlobFlatSegmentResponse page.
+func (p *containerClientListBlobFlatSegmentPager) PageResponse() containerClientListBlobFlatSegmentResponse {
return p.current
}
-// ContainerListBlobHierarchySegmentPager provides operations for iterating over paged responses.
-type ContainerListBlobHierarchySegmentPager struct {
+// containerClientListBlobHierarchySegmentPager provides operations for iterating over paged responses.
+type containerClientListBlobHierarchySegmentPager struct {
client *containerClient
- current ContainerListBlobHierarchySegmentResponse
+ current containerClientListBlobHierarchySegmentResponse
err error
requester func(context.Context) (*policy.Request, error)
- advancer func(context.Context, ContainerListBlobHierarchySegmentResponse) (*policy.Request, error)
+ advancer func(context.Context, containerClientListBlobHierarchySegmentResponse) (*policy.Request, error)
}
// Err returns the last error encountered while paging.
-func (p *ContainerListBlobHierarchySegmentPager) Err() error {
+func (p *containerClientListBlobHierarchySegmentPager) Err() error {
return p.err
}
// NextPage returns true if the pager advanced to the next page.
// Returns false if there are no more pages or an error occurred.
-func (p *ContainerListBlobHierarchySegmentPager) NextPage(ctx context.Context) bool {
+func (p *containerClientListBlobHierarchySegmentPager) NextPage(ctx context.Context) bool {
var req *policy.Request
var err error
if !reflect.ValueOf(p.current).IsZero() {
@@ -102,7 +101,7 @@ func (p *ContainerListBlobHierarchySegmentPager) NextPage(ctx context.Context) b
p.err = err
return false
}
- resp, err := p.client.con.Pipeline().Do(req)
+ resp, err := p.client.pl.Do(req)
if err != nil {
p.err = err
return false
@@ -120,28 +119,136 @@ func (p *ContainerListBlobHierarchySegmentPager) NextPage(ctx context.Context) b
return true
}
-// PageResponse returns the current ContainerListBlobHierarchySegmentResponse page.
-func (p *ContainerListBlobHierarchySegmentPager) PageResponse() ContainerListBlobHierarchySegmentResponse {
+// PageResponse returns the current containerClientListBlobHierarchySegmentResponse page.
+func (p *containerClientListBlobHierarchySegmentPager) PageResponse() containerClientListBlobHierarchySegmentResponse {
+ return p.current
+}
+
+// pageBlobClientGetPageRangesDiffPager provides operations for iterating over paged responses.
+type pageBlobClientGetPageRangesDiffPager struct {
+ client *pageBlobClient
+ current pageBlobClientGetPageRangesDiffResponse
+ err error
+ requester func(context.Context) (*policy.Request, error)
+ advancer func(context.Context, pageBlobClientGetPageRangesDiffResponse) (*policy.Request, error)
+}
+
+// Err returns the last error encountered while paging.
+func (p *pageBlobClientGetPageRangesDiffPager) Err() error {
+ return p.err
+}
+
+// NextPage returns true if the pager advanced to the next page.
+// Returns false if there are no more pages or an error occurred.
+func (p *pageBlobClientGetPageRangesDiffPager) NextPage(ctx context.Context) bool {
+ var req *policy.Request
+ var err error
+ if !reflect.ValueOf(p.current).IsZero() {
+ if p.current.PageList.NextMarker == nil || len(*p.current.PageList.NextMarker) == 0 {
+ return false
+ }
+ req, err = p.advancer(ctx, p.current)
+ } else {
+ req, err = p.requester(ctx)
+ }
+ if err != nil {
+ p.err = err
+ return false
+ }
+ resp, err := p.client.pl.Do(req)
+ if err != nil {
+ p.err = err
+ return false
+ }
+ if !runtime.HasStatusCode(resp, http.StatusOK) {
+ p.err = runtime.NewResponseError(resp)
+ return false
+ }
+ result, err := p.client.getPageRangesDiffHandleResponse(resp)
+ if err != nil {
+ p.err = err
+ return false
+ }
+ p.current = result
+ return true
+}
+
+// PageResponse returns the current pageBlobClientGetPageRangesDiffResponse page.
+func (p *pageBlobClientGetPageRangesDiffPager) PageResponse() pageBlobClientGetPageRangesDiffResponse {
+ return p.current
+}
+
+// pageBlobClientGetPageRangesPager provides operations for iterating over paged responses.
+type pageBlobClientGetPageRangesPager struct {
+ client *pageBlobClient
+ current pageBlobClientGetPageRangesResponse
+ err error
+ requester func(context.Context) (*policy.Request, error)
+ advancer func(context.Context, pageBlobClientGetPageRangesResponse) (*policy.Request, error)
+}
+
+// Err returns the last error encountered while paging.
+func (p *pageBlobClientGetPageRangesPager) Err() error {
+ return p.err
+}
+
+// NextPage returns true if the pager advanced to the next page.
+// Returns false if there are no more pages or an error occurred.
+func (p *pageBlobClientGetPageRangesPager) NextPage(ctx context.Context) bool {
+ var req *policy.Request
+ var err error
+ if !reflect.ValueOf(p.current).IsZero() {
+ if p.current.PageList.NextMarker == nil || len(*p.current.PageList.NextMarker) == 0 {
+ return false
+ }
+ req, err = p.advancer(ctx, p.current)
+ } else {
+ req, err = p.requester(ctx)
+ }
+ if err != nil {
+ p.err = err
+ return false
+ }
+ resp, err := p.client.pl.Do(req)
+ if err != nil {
+ p.err = err
+ return false
+ }
+ if !runtime.HasStatusCode(resp, http.StatusOK) {
+ p.err = runtime.NewResponseError(resp)
+ return false
+ }
+ result, err := p.client.getPageRangesHandleResponse(resp)
+ if err != nil {
+ p.err = err
+ return false
+ }
+ p.current = result
+ return true
+}
+
+// PageResponse returns the current pageBlobClientGetPageRangesResponse page.
+func (p *pageBlobClientGetPageRangesPager) PageResponse() pageBlobClientGetPageRangesResponse {
return p.current
}
-// ServiceListContainersSegmentPager provides operations for iterating over paged responses.
-type ServiceListContainersSegmentPager struct {
+// serviceClientListContainersSegmentPager provides operations for iterating over paged responses.
+type serviceClientListContainersSegmentPager struct {
client *serviceClient
- current ServiceListContainersSegmentResponse
+ current serviceClientListContainersSegmentResponse
err error
requester func(context.Context) (*policy.Request, error)
- advancer func(context.Context, ServiceListContainersSegmentResponse) (*policy.Request, error)
+ advancer func(context.Context, serviceClientListContainersSegmentResponse) (*policy.Request, error)
}
// Err returns the last error encountered while paging.
-func (p *ServiceListContainersSegmentPager) Err() error {
+func (p *serviceClientListContainersSegmentPager) Err() error {
return p.err
}
// NextPage returns true if the pager advanced to the next page.
// Returns false if there are no more pages or an error occurred.
-func (p *ServiceListContainersSegmentPager) NextPage(ctx context.Context) bool {
+func (p *serviceClientListContainersSegmentPager) NextPage(ctx context.Context) bool {
var req *policy.Request
var err error
if !reflect.ValueOf(p.current).IsZero() {
@@ -156,7 +263,7 @@ func (p *ServiceListContainersSegmentPager) NextPage(ctx context.Context) bool {
p.err = err
return false
}
- resp, err := p.client.con.Pipeline().Do(req)
+ resp, err := p.client.pl.Do(req)
if err != nil {
p.err = err
return false
@@ -174,7 +281,7 @@ func (p *ServiceListContainersSegmentPager) NextPage(ctx context.Context) bool {
return true
}
-// PageResponse returns the current ServiceListContainersSegmentResponse page.
-func (p *ServiceListContainersSegmentPager) PageResponse() ServiceListContainersSegmentResponse {
+// PageResponse returns the current serviceClientListContainersSegmentResponse page.
+func (p *serviceClientListContainersSegmentPager) PageResponse() serviceClientListContainersSegmentResponse {
return p.current
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_response_types.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_response_types.go
index 7729cf8c7a3..60c1c0c34ec 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_response_types.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_response_types.go
@@ -1,5 +1,5 @@
-//go:build go1.16
-// +build go1.16
+//go:build go1.18
+// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
@@ -13,15 +13,15 @@ import (
"time"
)
-// AppendBlobAppendBlockFromURLResponse contains the response from method AppendBlob.AppendBlockFromURL.
-type AppendBlobAppendBlockFromURLResponse struct {
- AppendBlobAppendBlockFromURLResult
+// appendBlobClientAppendBlockFromURLResponse contains the response from method appendBlobClient.AppendBlockFromURL.
+type appendBlobClientAppendBlockFromURLResponse struct {
+ appendBlobClientAppendBlockFromURLResult
// RawResponse contains the underlying HTTP response.
RawResponse *http.Response
}
-// AppendBlobAppendBlockFromURLResult contains the result from method AppendBlob.AppendBlockFromURL.
-type AppendBlobAppendBlockFromURLResult struct {
+// appendBlobClientAppendBlockFromURLResult contains the result from method appendBlobClient.AppendBlockFromURL.
+type appendBlobClientAppendBlockFromURLResult struct {
// BlobAppendOffset contains the information returned from the x-ms-blob-append-offset header response.
BlobAppendOffset *string
@@ -59,15 +59,15 @@ type AppendBlobAppendBlockFromURLResult struct {
XMSContentCRC64 []byte
}
-// AppendBlobAppendBlockResponse contains the response from method AppendBlob.AppendBlock.
-type AppendBlobAppendBlockResponse struct {
- AppendBlobAppendBlockResult
+// appendBlobClientAppendBlockResponse contains the response from method appendBlobClient.AppendBlock.
+type appendBlobClientAppendBlockResponse struct {
+ appendBlobClientAppendBlockResult
// RawResponse contains the underlying HTTP response.
RawResponse *http.Response
}
-// AppendBlobAppendBlockResult contains the result from method AppendBlob.AppendBlock.
-type AppendBlobAppendBlockResult struct {
+// appendBlobClientAppendBlockResult contains the result from method appendBlobClient.AppendBlock.
+type appendBlobClientAppendBlockResult struct {
// BlobAppendOffset contains the information returned from the x-ms-blob-append-offset header response.
BlobAppendOffset *string
@@ -108,15 +108,15 @@ type AppendBlobAppendBlockResult struct {
XMSContentCRC64 []byte
}
-// AppendBlobCreateResponse contains the response from method AppendBlob.Create.
-type AppendBlobCreateResponse struct {
- AppendBlobCreateResult
+// appendBlobClientCreateResponse contains the response from method appendBlobClient.Create.
+type appendBlobClientCreateResponse struct {
+ appendBlobClientCreateResult
// RawResponse contains the underlying HTTP response.
RawResponse *http.Response
}
-// AppendBlobCreateResult contains the result from method AppendBlob.Create.
-type AppendBlobCreateResult struct {
+// appendBlobClientCreateResult contains the result from method appendBlobClient.Create.
+type appendBlobClientCreateResult struct {
// ClientRequestID contains the information returned from the x-ms-client-request-id header response.
ClientRequestID *string
@@ -151,15 +151,15 @@ type AppendBlobCreateResult struct {
VersionID *string
}
-// AppendBlobSealResponse contains the response from method AppendBlob.Seal.
-type AppendBlobSealResponse struct {
- AppendBlobSealResult
+// appendBlobClientSealResponse contains the response from method appendBlobClient.Seal.
+type appendBlobClientSealResponse struct {
+ appendBlobClientSealResult
// RawResponse contains the underlying HTTP response.
RawResponse *http.Response
}
-// AppendBlobSealResult contains the result from method AppendBlob.Seal.
-type AppendBlobSealResult struct {
+// appendBlobClientSealResult contains the result from method appendBlobClient.Seal.
+type appendBlobClientSealResult struct {
// ClientRequestID contains the information returned from the x-ms-client-request-id header response.
ClientRequestID *string
@@ -182,15 +182,15 @@ type AppendBlobSealResult struct {
Version *string
}
-// BlobAbortCopyFromURLResponse contains the response from method Blob.AbortCopyFromURL.
-type BlobAbortCopyFromURLResponse struct {
- BlobAbortCopyFromURLResult
+// blobClientAbortCopyFromURLResponse contains the response from method blobClient.AbortCopyFromURL.
+type blobClientAbortCopyFromURLResponse struct {
+ blobClientAbortCopyFromURLResult
// RawResponse contains the underlying HTTP response.
RawResponse *http.Response
}
-// BlobAbortCopyFromURLResult contains the result from method Blob.AbortCopyFromURL.
-type BlobAbortCopyFromURLResult struct {
+// blobClientAbortCopyFromURLResult contains the result from method blobClient.AbortCopyFromURL.
+type blobClientAbortCopyFromURLResult struct {
// ClientRequestID contains the information returned from the x-ms-client-request-id header response.
ClientRequestID *string
@@ -204,15 +204,15 @@ type BlobAbortCopyFromURLResult struct {
Version *string
}
-// BlobAcquireLeaseResponse contains the response from method Blob.AcquireLease.
-type BlobAcquireLeaseResponse struct {
- BlobAcquireLeaseResult
+// blobClientAcquireLeaseResponse contains the response from method blobClient.AcquireLease.
+type blobClientAcquireLeaseResponse struct {
+ blobClientAcquireLeaseResult
// RawResponse contains the underlying HTTP response.
RawResponse *http.Response
}
-// BlobAcquireLeaseResult contains the result from method Blob.AcquireLease.
-type BlobAcquireLeaseResult struct {
+// blobClientAcquireLeaseResult contains the result from method blobClient.AcquireLease.
+type blobClientAcquireLeaseResult struct {
// ClientRequestID contains the information returned from the x-ms-client-request-id header response.
ClientRequestID *string
@@ -235,15 +235,15 @@ type BlobAcquireLeaseResult struct {
Version *string
}
-// BlobBreakLeaseResponse contains the response from method Blob.BreakLease.
-type BlobBreakLeaseResponse struct {
- BlobBreakLeaseResult
+// blobClientBreakLeaseResponse contains the response from method blobClient.BreakLease.
+type blobClientBreakLeaseResponse struct {
+ blobClientBreakLeaseResult
// RawResponse contains the underlying HTTP response.
RawResponse *http.Response
}
-// BlobBreakLeaseResult contains the result from method Blob.BreakLease.
-type BlobBreakLeaseResult struct {
+// blobClientBreakLeaseResult contains the result from method blobClient.BreakLease.
+type blobClientBreakLeaseResult struct {
// ClientRequestID contains the information returned from the x-ms-client-request-id header response.
ClientRequestID *string
@@ -266,15 +266,15 @@ type BlobBreakLeaseResult struct {
Version *string
}
-// BlobChangeLeaseResponse contains the response from method Blob.ChangeLease.
-type BlobChangeLeaseResponse struct {
- BlobChangeLeaseResult
+// blobClientChangeLeaseResponse contains the response from method blobClient.ChangeLease.
+type blobClientChangeLeaseResponse struct {
+ blobClientChangeLeaseResult
// RawResponse contains the underlying HTTP response.
RawResponse *http.Response
}
-// BlobChangeLeaseResult contains the result from method Blob.ChangeLease.
-type BlobChangeLeaseResult struct {
+// blobClientChangeLeaseResult contains the result from method blobClient.ChangeLease.
+type blobClientChangeLeaseResult struct {
// ClientRequestID contains the information returned from the x-ms-client-request-id header response.
ClientRequestID *string
@@ -297,15 +297,15 @@ type BlobChangeLeaseResult struct {
Version *string
}
-// BlobCopyFromURLResponse contains the response from method Blob.CopyFromURL.
-type BlobCopyFromURLResponse struct {
- BlobCopyFromURLResult
+// blobClientCopyFromURLResponse contains the response from method blobClient.CopyFromURL.
+type blobClientCopyFromURLResponse struct {
+ blobClientCopyFromURLResult
// RawResponse contains the underlying HTTP response.
RawResponse *http.Response
}
-// BlobCopyFromURLResult contains the result from method Blob.CopyFromURL.
-type BlobCopyFromURLResult struct {
+// blobClientCopyFromURLResult contains the result from method blobClient.CopyFromURL.
+type blobClientCopyFromURLResult struct {
// ClientRequestID contains the information returned from the x-ms-client-request-id header response.
ClientRequestID *string
@@ -340,15 +340,15 @@ type BlobCopyFromURLResult struct {
XMSContentCRC64 []byte
}
-// BlobCreateSnapshotResponse contains the response from method Blob.CreateSnapshot.
-type BlobCreateSnapshotResponse struct {
- BlobCreateSnapshotResult
+// blobClientCreateSnapshotResponse contains the response from method blobClient.CreateSnapshot.
+type blobClientCreateSnapshotResponse struct {
+ blobClientCreateSnapshotResult
// RawResponse contains the underlying HTTP response.
RawResponse *http.Response
}
-// BlobCreateSnapshotResult contains the result from method Blob.CreateSnapshot.
-type BlobCreateSnapshotResult struct {
+// blobClientCreateSnapshotResult contains the result from method blobClient.CreateSnapshot.
+type blobClientCreateSnapshotResult struct {
// ClientRequestID contains the information returned from the x-ms-client-request-id header response.
ClientRequestID *string
@@ -377,15 +377,15 @@ type BlobCreateSnapshotResult struct {
VersionID *string
}
-// BlobDeleteResponse contains the response from method Blob.Delete.
-type BlobDeleteResponse struct {
- BlobDeleteResult
+// blobClientDeleteImmutabilityPolicyResponse contains the response from method blobClient.DeleteImmutabilityPolicy.
+type blobClientDeleteImmutabilityPolicyResponse struct {
+ blobClientDeleteImmutabilityPolicyResult
// RawResponse contains the underlying HTTP response.
RawResponse *http.Response
}
-// BlobDeleteResult contains the result from method Blob.Delete.
-type BlobDeleteResult struct {
+// blobClientDeleteImmutabilityPolicyResult contains the result from method blobClient.DeleteImmutabilityPolicy.
+type blobClientDeleteImmutabilityPolicyResult struct {
// ClientRequestID contains the information returned from the x-ms-client-request-id header response.
ClientRequestID *string
@@ -399,15 +399,37 @@ type BlobDeleteResult struct {
Version *string
}
-// BlobDownloadResponse contains the response from method Blob.Download.
-type BlobDownloadResponse struct {
- BlobDownloadResult
+// blobClientDeleteResponse contains the response from method blobClient.Delete.
+type blobClientDeleteResponse struct {
+ blobClientDeleteResult
// RawResponse contains the underlying HTTP response.
RawResponse *http.Response
}
-// BlobDownloadResult contains the result from method Blob.Download.
-type BlobDownloadResult struct {
+// blobClientDeleteResult contains the result from method blobClient.Delete.
+type blobClientDeleteResult struct {
+ // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
+ ClientRequestID *string
+
+ // Date contains the information returned from the Date header response.
+ Date *time.Time
+
+ // RequestID contains the information returned from the x-ms-request-id header response.
+ RequestID *string
+
+ // Version contains the information returned from the x-ms-version header response.
+ Version *string
+}
+
+// blobClientDownloadResponse contains the response from method blobClient.Download.
+type blobClientDownloadResponse struct {
+ blobClientDownloadResult
+ // RawResponse contains the underlying HTTP response.
+ RawResponse *http.Response
+}
+
+// blobClientDownloadResult contains the result from method blobClient.Download.
+type blobClientDownloadResult struct {
// AcceptRanges contains the information returned from the Accept-Ranges header response.
AcceptRanges *string
@@ -483,6 +505,15 @@ type BlobDownloadResult struct {
// EncryptionScope contains the information returned from the x-ms-encryption-scope header response.
EncryptionScope *string
+ // ErrorCode contains the information returned from the x-ms-error-code header response.
+ ErrorCode *string
+
+ // ImmutabilityPolicyExpiresOn contains the information returned from the x-ms-immutability-policy-until-date header response.
+ ImmutabilityPolicyExpiresOn *time.Time
+
+ // ImmutabilityPolicyMode contains the information returned from the x-ms-immutability-policy-mode header response.
+ ImmutabilityPolicyMode *BlobImmutabilityPolicyMode
+
// IsCurrentVersion contains the information returned from the x-ms-is-current-version header response.
IsCurrentVersion *bool
@@ -492,6 +523,9 @@ type BlobDownloadResult struct {
// IsServerEncrypted contains the information returned from the x-ms-server-encrypted header response.
IsServerEncrypted *bool
+ // LastAccessed contains the information returned from the x-ms-last-access-time header response.
+ LastAccessed *time.Time
+
// LastModified contains the information returned from the Last-Modified header response.
LastModified *time.Time
@@ -504,6 +538,9 @@ type BlobDownloadResult struct {
// LeaseStatus contains the information returned from the x-ms-lease-status header response.
LeaseStatus *LeaseStatusType
+ // LegalHold contains the information returned from the x-ms-legal-hold header response.
+ LegalHold *bool
+
// Metadata contains the information returned from the x-ms-meta header response.
Metadata map[string]string
@@ -526,52 +563,15 @@ type BlobDownloadResult struct {
VersionID *string
}
-// BlobGetAccessControlResponse contains the response from method Blob.GetAccessControl.
-type BlobGetAccessControlResponse struct {
- BlobGetAccessControlResult
- // RawResponse contains the underlying HTTP response.
- RawResponse *http.Response
-}
-
-// BlobGetAccessControlResult contains the result from method Blob.GetAccessControl.
-type BlobGetAccessControlResult struct {
- // Date contains the information returned from the Date header response.
- Date *time.Time
-
- // ETag contains the information returned from the ETag header response.
- ETag *string
-
- // LastModified contains the information returned from the Last-Modified header response.
- LastModified *time.Time
-
- // RequestID contains the information returned from the x-ms-request-id header response.
- RequestID *string
-
- // Version contains the information returned from the x-ms-version header response.
- Version *string
-
- // XMSACL contains the information returned from the x-ms-acl header response.
- XMSACL *string
-
- // XMSGroup contains the information returned from the x-ms-group header response.
- XMSGroup *string
-
- // XMSOwner contains the information returned from the x-ms-owner header response.
- XMSOwner *string
-
- // XMSPermissions contains the information returned from the x-ms-permissions header response.
- XMSPermissions *string
-}
-
-// BlobGetAccountInfoResponse contains the response from method Blob.GetAccountInfo.
-type BlobGetAccountInfoResponse struct {
- BlobGetAccountInfoResult
+// blobClientGetAccountInfoResponse contains the response from method blobClient.GetAccountInfo.
+type blobClientGetAccountInfoResponse struct {
+ blobClientGetAccountInfoResult
// RawResponse contains the underlying HTTP response.
RawResponse *http.Response
}
-// BlobGetAccountInfoResult contains the result from method Blob.GetAccountInfo.
-type BlobGetAccountInfoResult struct {
+// blobClientGetAccountInfoResult contains the result from method blobClient.GetAccountInfo.
+type blobClientGetAccountInfoResult struct {
// AccountKind contains the information returned from the x-ms-account-kind header response.
AccountKind *AccountKind
@@ -591,15 +591,15 @@ type BlobGetAccountInfoResult struct {
Version *string
}
-// BlobGetPropertiesResponse contains the response from method Blob.GetProperties.
-type BlobGetPropertiesResponse struct {
- BlobGetPropertiesResult
+// blobClientGetPropertiesResponse contains the response from method blobClient.GetProperties.
+type blobClientGetPropertiesResponse struct {
+ blobClientGetPropertiesResult
// RawResponse contains the underlying HTTP response.
RawResponse *http.Response
}
-// BlobGetPropertiesResult contains the result from method Blob.GetProperties.
-type BlobGetPropertiesResult struct {
+// blobClientGetPropertiesResult contains the result from method blobClient.GetProperties.
+type blobClientGetPropertiesResult struct {
// AcceptRanges contains the information returned from the Accept-Ranges header response.
AcceptRanges *string
@@ -687,6 +687,12 @@ type BlobGetPropertiesResult struct {
// ExpiresOn contains the information returned from the x-ms-expiry-time header response.
ExpiresOn *time.Time
+ // ImmutabilityPolicyExpiresOn contains the information returned from the x-ms-immutability-policy-until-date header response.
+ ImmutabilityPolicyExpiresOn *time.Time
+
+ // ImmutabilityPolicyMode contains the information returned from the x-ms-immutability-policy-mode header response.
+ ImmutabilityPolicyMode *BlobImmutabilityPolicyMode
+
// IsCurrentVersion contains the information returned from the x-ms-is-current-version header response.
IsCurrentVersion *bool
@@ -699,6 +705,9 @@ type BlobGetPropertiesResult struct {
// IsServerEncrypted contains the information returned from the x-ms-server-encrypted header response.
IsServerEncrypted *bool
+ // LastAccessed contains the information returned from the x-ms-last-access-time header response.
+ LastAccessed *time.Time
+
// LastModified contains the information returned from the Last-Modified header response.
LastModified *time.Time
@@ -711,6 +720,9 @@ type BlobGetPropertiesResult struct {
// LeaseStatus contains the information returned from the x-ms-lease-status header response.
LeaseStatus *LeaseStatusType
+ // LegalHold contains the information returned from the x-ms-legal-hold header response.
+ LegalHold *bool
+
// Metadata contains the information returned from the x-ms-meta header response.
Metadata map[string]string
@@ -736,15 +748,15 @@ type BlobGetPropertiesResult struct {
VersionID *string
}
-// BlobGetTagsResponse contains the response from method Blob.GetTags.
-type BlobGetTagsResponse struct {
- BlobGetTagsResult
+// blobClientGetTagsResponse contains the response from method blobClient.GetTags.
+type blobClientGetTagsResponse struct {
+ blobClientGetTagsResult
// RawResponse contains the underlying HTTP response.
RawResponse *http.Response
}
-// BlobGetTagsResult contains the result from method Blob.GetTags.
-type BlobGetTagsResult struct {
+// blobClientGetTagsResult contains the result from method blobClient.GetTags.
+type blobClientGetTagsResult struct {
BlobTags
// ClientRequestID contains the information returned from the x-ms-client-request-id header response.
ClientRequestID *string `xml:"ClientRequestID"`
@@ -759,15 +771,15 @@ type BlobGetTagsResult struct {
Version *string `xml:"Version"`
}
-// BlobQueryResponse contains the response from method Blob.Query.
-type BlobQueryResponse struct {
- BlobQueryResult
+// blobClientQueryResponse contains the response from method blobClient.Query.
+type blobClientQueryResponse struct {
+ blobClientQueryResult
// RawResponse contains the underlying HTTP response.
RawResponse *http.Response
}
-// BlobQueryResult contains the result from method Blob.Query.
-type BlobQueryResult struct {
+// blobClientQueryResult contains the result from method blobClient.Query.
+type blobClientQueryResult struct {
// AcceptRanges contains the information returned from the Accept-Ranges header response.
AcceptRanges *string
@@ -868,15 +880,15 @@ type BlobQueryResult struct {
Version *string
}
-// BlobReleaseLeaseResponse contains the response from method Blob.ReleaseLease.
-type BlobReleaseLeaseResponse struct {
- BlobReleaseLeaseResult
+// blobClientReleaseLeaseResponse contains the response from method blobClient.ReleaseLease.
+type blobClientReleaseLeaseResponse struct {
+ blobClientReleaseLeaseResult
// RawResponse contains the underlying HTTP response.
RawResponse *http.Response
}
-// BlobReleaseLeaseResult contains the result from method Blob.ReleaseLease.
-type BlobReleaseLeaseResult struct {
+// blobClientReleaseLeaseResult contains the result from method blobClient.ReleaseLease.
+type blobClientReleaseLeaseResult struct {
// ClientRequestID contains the information returned from the x-ms-client-request-id header response.
ClientRequestID *string
@@ -896,21 +908,18 @@ type BlobReleaseLeaseResult struct {
Version *string
}
-// BlobRenameResponse contains the response from method Blob.Rename.
-type BlobRenameResponse struct {
- BlobRenameResult
+// blobClientRenewLeaseResponse contains the response from method blobClient.RenewLease.
+type blobClientRenewLeaseResponse struct {
+ blobClientRenewLeaseResult
// RawResponse contains the underlying HTTP response.
RawResponse *http.Response
}
-// BlobRenameResult contains the result from method Blob.Rename.
-type BlobRenameResult struct {
+// blobClientRenewLeaseResult contains the result from method blobClient.RenewLease.
+type blobClientRenewLeaseResult struct {
// ClientRequestID contains the information returned from the x-ms-client-request-id header response.
ClientRequestID *string
- // ContentLength contains the information returned from the Content-Length header response.
- ContentLength *int64
-
// Date contains the information returned from the Date header response.
Date *time.Time
@@ -920,6 +929,9 @@ type BlobRenameResult struct {
// LastModified contains the information returned from the Last-Modified header response.
LastModified *time.Time
+ // LeaseID contains the information returned from the x-ms-lease-id header response.
+ LeaseID *string
+
// RequestID contains the information returned from the x-ms-request-id header response.
RequestID *string
@@ -927,15 +939,15 @@ type BlobRenameResult struct {
Version *string
}
-// BlobRenewLeaseResponse contains the response from method Blob.RenewLease.
-type BlobRenewLeaseResponse struct {
- BlobRenewLeaseResult
+// blobClientSetExpiryResponse contains the response from method blobClient.SetExpiry.
+type blobClientSetExpiryResponse struct {
+ blobClientSetExpiryResult
// RawResponse contains the underlying HTTP response.
RawResponse *http.Response
}
-// BlobRenewLeaseResult contains the result from method Blob.RenewLease.
-type BlobRenewLeaseResult struct {
+// blobClientSetExpiryResult contains the result from method blobClient.SetExpiry.
+type blobClientSetExpiryResult struct {
// ClientRequestID contains the information returned from the x-ms-client-request-id header response.
ClientRequestID *string
@@ -948,9 +960,6 @@ type BlobRenewLeaseResult struct {
// LastModified contains the information returned from the Last-Modified header response.
LastModified *time.Time
- // LeaseID contains the information returned from the x-ms-lease-id header response.
- LeaseID *string
-
// RequestID contains the information returned from the x-ms-request-id header response.
RequestID *string
@@ -958,15 +967,21 @@ type BlobRenewLeaseResult struct {
Version *string
}
-// BlobSetAccessControlResponse contains the response from method Blob.SetAccessControl.
-type BlobSetAccessControlResponse struct {
- BlobSetAccessControlResult
+// blobClientSetHTTPHeadersResponse contains the response from method blobClient.SetHTTPHeaders.
+type blobClientSetHTTPHeadersResponse struct {
+ blobClientSetHTTPHeadersResult
// RawResponse contains the underlying HTTP response.
RawResponse *http.Response
}
-// BlobSetAccessControlResult contains the result from method Blob.SetAccessControl.
-type BlobSetAccessControlResult struct {
+// blobClientSetHTTPHeadersResult contains the result from method blobClient.SetHTTPHeaders.
+type blobClientSetHTTPHeadersResult struct {
+ // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response.
+ BlobSequenceNumber *int64
+
+ // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
+ ClientRequestID *string
+
// Date contains the information returned from the Date header response.
Date *time.Time
@@ -983,26 +998,26 @@ type BlobSetAccessControlResult struct {
Version *string
}
-// BlobSetExpiryResponse contains the response from method Blob.SetExpiry.
-type BlobSetExpiryResponse struct {
- BlobSetExpiryResult
+// blobClientSetImmutabilityPolicyResponse contains the response from method blobClient.SetImmutabilityPolicy.
+type blobClientSetImmutabilityPolicyResponse struct {
+ blobClientSetImmutabilityPolicyResult
// RawResponse contains the underlying HTTP response.
RawResponse *http.Response
}
-// BlobSetExpiryResult contains the result from method Blob.SetExpiry.
-type BlobSetExpiryResult struct {
+// blobClientSetImmutabilityPolicyResult contains the result from method blobClient.SetImmutabilityPolicy.
+type blobClientSetImmutabilityPolicyResult struct {
// ClientRequestID contains the information returned from the x-ms-client-request-id header response.
ClientRequestID *string
// Date contains the information returned from the Date header response.
Date *time.Time
- // ETag contains the information returned from the ETag header response.
- ETag *string
+ // ImmutabilityPolicyExpiry contains the information returned from the x-ms-immutability-policy-until-date header response.
+ ImmutabilityPolicyExpiry *time.Time
- // LastModified contains the information returned from the Last-Modified header response.
- LastModified *time.Time
+ // ImmutabilityPolicyMode contains the information returned from the x-ms-immutability-policy-mode header response.
+ ImmutabilityPolicyMode *BlobImmutabilityPolicyMode
// RequestID contains the information returned from the x-ms-request-id header response.
RequestID *string
@@ -1011,29 +1026,23 @@ type BlobSetExpiryResult struct {
Version *string
}
-// BlobSetHTTPHeadersResponse contains the response from method Blob.SetHTTPHeaders.
-type BlobSetHTTPHeadersResponse struct {
- BlobSetHTTPHeadersResult
+// blobClientSetLegalHoldResponse contains the response from method blobClient.SetLegalHold.
+type blobClientSetLegalHoldResponse struct {
+ blobClientSetLegalHoldResult
// RawResponse contains the underlying HTTP response.
RawResponse *http.Response
}
-// BlobSetHTTPHeadersResult contains the result from method Blob.SetHTTPHeaders.
-type BlobSetHTTPHeadersResult struct {
- // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response.
- BlobSequenceNumber *int64
-
+// blobClientSetLegalHoldResult contains the result from method blobClient.SetLegalHold.
+type blobClientSetLegalHoldResult struct {
// ClientRequestID contains the information returned from the x-ms-client-request-id header response.
ClientRequestID *string
// Date contains the information returned from the Date header response.
Date *time.Time
- // ETag contains the information returned from the ETag header response.
- ETag *string
-
- // LastModified contains the information returned from the Last-Modified header response.
- LastModified *time.Time
+ // LegalHold contains the information returned from the x-ms-legal-hold header response.
+ LegalHold *bool
// RequestID contains the information returned from the x-ms-request-id header response.
RequestID *string
@@ -1042,15 +1051,15 @@ type BlobSetHTTPHeadersResult struct {
Version *string
}
-// BlobSetMetadataResponse contains the response from method Blob.SetMetadata.
-type BlobSetMetadataResponse struct {
- BlobSetMetadataResult
+// blobClientSetMetadataResponse contains the response from method blobClient.SetMetadata.
+type blobClientSetMetadataResponse struct {
+ blobClientSetMetadataResult
// RawResponse contains the underlying HTTP response.
RawResponse *http.Response
}
-// BlobSetMetadataResult contains the result from method Blob.SetMetadata.
-type BlobSetMetadataResult struct {
+// blobClientSetMetadataResult contains the result from method blobClient.SetMetadata.
+type blobClientSetMetadataResult struct {
// ClientRequestID contains the information returned from the x-ms-client-request-id header response.
ClientRequestID *string
@@ -1082,15 +1091,15 @@ type BlobSetMetadataResult struct {
VersionID *string
}
-// BlobSetTagsResponse contains the response from method Blob.SetTags.
-type BlobSetTagsResponse struct {
- BlobSetTagsResult
+// blobClientSetTagsResponse contains the response from method blobClient.SetTags.
+type blobClientSetTagsResponse struct {
+ blobClientSetTagsResult
// RawResponse contains the underlying HTTP response.
RawResponse *http.Response
}
-// BlobSetTagsResult contains the result from method Blob.SetTags.
-type BlobSetTagsResult struct {
+// blobClientSetTagsResult contains the result from method blobClient.SetTags.
+type blobClientSetTagsResult struct {
// ClientRequestID contains the information returned from the x-ms-client-request-id header response.
ClientRequestID *string
@@ -1104,15 +1113,15 @@ type BlobSetTagsResult struct {
Version *string
}
-// BlobSetTierResponse contains the response from method Blob.SetTier.
-type BlobSetTierResponse struct {
- BlobSetTierResult
+// blobClientSetTierResponse contains the response from method blobClient.SetTier.
+type blobClientSetTierResponse struct {
+ blobClientSetTierResult
// RawResponse contains the underlying HTTP response.
RawResponse *http.Response
}
-// BlobSetTierResult contains the result from method Blob.SetTier.
-type BlobSetTierResult struct {
+// blobClientSetTierResult contains the result from method blobClient.SetTier.
+type blobClientSetTierResult struct {
// ClientRequestID contains the information returned from the x-ms-client-request-id header response.
ClientRequestID *string
@@ -1123,15 +1132,15 @@ type BlobSetTierResult struct {
Version *string
}
-// BlobStartCopyFromURLResponse contains the response from method Blob.StartCopyFromURL.
-type BlobStartCopyFromURLResponse struct {
- BlobStartCopyFromURLResult
+// blobClientStartCopyFromURLResponse contains the response from method blobClient.StartCopyFromURL.
+type blobClientStartCopyFromURLResponse struct {
+ blobClientStartCopyFromURLResult
// RawResponse contains the underlying HTTP response.
RawResponse *http.Response
}
-// BlobStartCopyFromURLResult contains the result from method Blob.StartCopyFromURL.
-type BlobStartCopyFromURLResult struct {
+// blobClientStartCopyFromURLResult contains the result from method blobClient.StartCopyFromURL.
+type blobClientStartCopyFromURLResult struct {
// ClientRequestID contains the information returned from the x-ms-client-request-id header response.
ClientRequestID *string
@@ -1160,15 +1169,15 @@ type BlobStartCopyFromURLResult struct {
VersionID *string
}
-// BlobUndeleteResponse contains the response from method Blob.Undelete.
-type BlobUndeleteResponse struct {
- BlobUndeleteResult
+// blobClientUndeleteResponse contains the response from method blobClient.Undelete.
+type blobClientUndeleteResponse struct {
+ blobClientUndeleteResult
// RawResponse contains the underlying HTTP response.
RawResponse *http.Response
}
-// BlobUndeleteResult contains the result from method Blob.Undelete.
-type BlobUndeleteResult struct {
+// blobClientUndeleteResult contains the result from method blobClient.Undelete.
+type blobClientUndeleteResult struct {
// ClientRequestID contains the information returned from the x-ms-client-request-id header response.
ClientRequestID *string
@@ -1182,15 +1191,15 @@ type BlobUndeleteResult struct {
Version *string
}
-// BlockBlobCommitBlockListResponse contains the response from method BlockBlob.CommitBlockList.
-type BlockBlobCommitBlockListResponse struct {
- BlockBlobCommitBlockListResult
+// blockBlobClientCommitBlockListResponse contains the response from method blockBlobClient.CommitBlockList.
+type blockBlobClientCommitBlockListResponse struct {
+ blockBlobClientCommitBlockListResult
// RawResponse contains the underlying HTTP response.
RawResponse *http.Response
}
-// BlockBlobCommitBlockListResult contains the result from method BlockBlob.CommitBlockList.
-type BlockBlobCommitBlockListResult struct {
+// blockBlobClientCommitBlockListResult contains the result from method blockBlobClient.CommitBlockList.
+type blockBlobClientCommitBlockListResult struct {
// ClientRequestID contains the information returned from the x-ms-client-request-id header response.
ClientRequestID *string
@@ -1228,15 +1237,15 @@ type BlockBlobCommitBlockListResult struct {
XMSContentCRC64 []byte
}
-// BlockBlobGetBlockListResponse contains the response from method BlockBlob.GetBlockList.
-type BlockBlobGetBlockListResponse struct {
- BlockBlobGetBlockListResult
+// blockBlobClientGetBlockListResponse contains the response from method blockBlobClient.GetBlockList.
+type blockBlobClientGetBlockListResponse struct {
+ blockBlobClientGetBlockListResult
// RawResponse contains the underlying HTTP response.
RawResponse *http.Response
}
-// BlockBlobGetBlockListResult contains the result from method BlockBlob.GetBlockList.
-type BlockBlobGetBlockListResult struct {
+// blockBlobClientGetBlockListResult contains the result from method blockBlobClient.GetBlockList.
+type blockBlobClientGetBlockListResult struct {
BlockList
// BlobContentLength contains the information returned from the x-ms-blob-content-length header response.
BlobContentLength *int64 `xml:"BlobContentLength"`
@@ -1263,15 +1272,58 @@ type BlockBlobGetBlockListResult struct {
Version *string `xml:"Version"`
}
-// BlockBlobStageBlockFromURLResponse contains the response from method BlockBlob.StageBlockFromURL.
-type BlockBlobStageBlockFromURLResponse struct {
- BlockBlobStageBlockFromURLResult
+// blockBlobClientPutBlobFromURLResponse contains the response from method blockBlobClient.PutBlobFromURL.
+type blockBlobClientPutBlobFromURLResponse struct {
+ blockBlobClientPutBlobFromURLResult
+ // RawResponse contains the underlying HTTP response.
+ RawResponse *http.Response
+}
+
+// blockBlobClientPutBlobFromURLResult contains the result from method blockBlobClient.PutBlobFromURL.
+type blockBlobClientPutBlobFromURLResult struct {
+ // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
+ ClientRequestID *string
+
+ // ContentMD5 contains the information returned from the Content-MD5 header response.
+ ContentMD5 []byte
+
+ // Date contains the information returned from the Date header response.
+ Date *time.Time
+
+ // ETag contains the information returned from the ETag header response.
+ ETag *string
+
+ // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response.
+ EncryptionKeySHA256 *string
+
+ // EncryptionScope contains the information returned from the x-ms-encryption-scope header response.
+ EncryptionScope *string
+
+ // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response.
+ IsServerEncrypted *bool
+
+ // LastModified contains the information returned from the Last-Modified header response.
+ LastModified *time.Time
+
+ // RequestID contains the information returned from the x-ms-request-id header response.
+ RequestID *string
+
+ // Version contains the information returned from the x-ms-version header response.
+ Version *string
+
+ // VersionID contains the information returned from the x-ms-version-id header response.
+ VersionID *string
+}
+
+// blockBlobClientStageBlockFromURLResponse contains the response from method blockBlobClient.StageBlockFromURL.
+type blockBlobClientStageBlockFromURLResponse struct {
+ blockBlobClientStageBlockFromURLResult
// RawResponse contains the underlying HTTP response.
RawResponse *http.Response
}
-// BlockBlobStageBlockFromURLResult contains the result from method BlockBlob.StageBlockFromURL.
-type BlockBlobStageBlockFromURLResult struct {
+// blockBlobClientStageBlockFromURLResult contains the result from method blockBlobClient.StageBlockFromURL.
+type blockBlobClientStageBlockFromURLResult struct {
// ClientRequestID contains the information returned from the x-ms-client-request-id header response.
ClientRequestID *string
@@ -1300,15 +1352,15 @@ type BlockBlobStageBlockFromURLResult struct {
XMSContentCRC64 []byte
}
-// BlockBlobStageBlockResponse contains the response from method BlockBlob.StageBlock.
-type BlockBlobStageBlockResponse struct {
- BlockBlobStageBlockResult
+// blockBlobClientStageBlockResponse contains the response from method blockBlobClient.StageBlock.
+type blockBlobClientStageBlockResponse struct {
+ blockBlobClientStageBlockResult
// RawResponse contains the underlying HTTP response.
RawResponse *http.Response
}
-// BlockBlobStageBlockResult contains the result from method BlockBlob.StageBlock.
-type BlockBlobStageBlockResult struct {
+// blockBlobClientStageBlockResult contains the result from method blockBlobClient.StageBlock.
+type blockBlobClientStageBlockResult struct {
// ClientRequestID contains the information returned from the x-ms-client-request-id header response.
ClientRequestID *string
@@ -1337,15 +1389,15 @@ type BlockBlobStageBlockResult struct {
XMSContentCRC64 []byte
}
-// BlockBlobUploadResponse contains the response from method BlockBlob.Upload.
-type BlockBlobUploadResponse struct {
- BlockBlobUploadResult
+// blockBlobClientUploadResponse contains the response from method blockBlobClient.Upload.
+type blockBlobClientUploadResponse struct {
+ blockBlobClientUploadResult
// RawResponse contains the underlying HTTP response.
RawResponse *http.Response
}
-// BlockBlobUploadResult contains the result from method BlockBlob.Upload.
-type BlockBlobUploadResult struct {
+// blockBlobClientUploadResult contains the result from method blockBlobClient.Upload.
+type blockBlobClientUploadResult struct {
// ClientRequestID contains the information returned from the x-ms-client-request-id header response.
ClientRequestID *string
@@ -1380,15 +1432,15 @@ type BlockBlobUploadResult struct {
VersionID *string
}
-// ContainerAcquireLeaseResponse contains the response from method Container.AcquireLease.
-type ContainerAcquireLeaseResponse struct {
- ContainerAcquireLeaseResult
+// containerClientAcquireLeaseResponse contains the response from method containerClient.AcquireLease.
+type containerClientAcquireLeaseResponse struct {
+ containerClientAcquireLeaseResult
// RawResponse contains the underlying HTTP response.
RawResponse *http.Response
}
-// ContainerAcquireLeaseResult contains the result from method Container.AcquireLease.
-type ContainerAcquireLeaseResult struct {
+// containerClientAcquireLeaseResult contains the result from method containerClient.AcquireLease.
+type containerClientAcquireLeaseResult struct {
// ClientRequestID contains the information returned from the x-ms-client-request-id header response.
ClientRequestID *string
@@ -1411,15 +1463,15 @@ type ContainerAcquireLeaseResult struct {
Version *string
}
-// ContainerBreakLeaseResponse contains the response from method Container.BreakLease.
-type ContainerBreakLeaseResponse struct {
- ContainerBreakLeaseResult
+// containerClientBreakLeaseResponse contains the response from method containerClient.BreakLease.
+type containerClientBreakLeaseResponse struct {
+ containerClientBreakLeaseResult
// RawResponse contains the underlying HTTP response.
RawResponse *http.Response
}
-// ContainerBreakLeaseResult contains the result from method Container.BreakLease.
-type ContainerBreakLeaseResult struct {
+// containerClientBreakLeaseResult contains the result from method containerClient.BreakLease.
+type containerClientBreakLeaseResult struct {
// ClientRequestID contains the information returned from the x-ms-client-request-id header response.
ClientRequestID *string
@@ -1442,15 +1494,15 @@ type ContainerBreakLeaseResult struct {
Version *string
}
-// ContainerChangeLeaseResponse contains the response from method Container.ChangeLease.
-type ContainerChangeLeaseResponse struct {
- ContainerChangeLeaseResult
+// containerClientChangeLeaseResponse contains the response from method containerClient.ChangeLease.
+type containerClientChangeLeaseResponse struct {
+ containerClientChangeLeaseResult
// RawResponse contains the underlying HTTP response.
RawResponse *http.Response
}
-// ContainerChangeLeaseResult contains the result from method Container.ChangeLease.
-type ContainerChangeLeaseResult struct {
+// containerClientChangeLeaseResult contains the result from method containerClient.ChangeLease.
+type containerClientChangeLeaseResult struct {
// ClientRequestID contains the information returned from the x-ms-client-request-id header response.
ClientRequestID *string
@@ -1473,15 +1525,15 @@ type ContainerChangeLeaseResult struct {
Version *string
}
-// ContainerCreateResponse contains the response from method Container.Create.
-type ContainerCreateResponse struct {
- ContainerCreateResult
+// containerClientCreateResponse contains the response from method containerClient.Create.
+type containerClientCreateResponse struct {
+ containerClientCreateResult
// RawResponse contains the underlying HTTP response.
RawResponse *http.Response
}
-// ContainerCreateResult contains the result from method Container.Create.
-type ContainerCreateResult struct {
+// containerClientCreateResult contains the result from method containerClient.Create.
+type containerClientCreateResult struct {
// ClientRequestID contains the information returned from the x-ms-client-request-id header response.
ClientRequestID *string
@@ -1501,15 +1553,15 @@ type ContainerCreateResult struct {
Version *string
}
-// ContainerDeleteResponse contains the response from method Container.Delete.
-type ContainerDeleteResponse struct {
- ContainerDeleteResult
+// containerClientDeleteResponse contains the response from method containerClient.Delete.
+type containerClientDeleteResponse struct {
+ containerClientDeleteResult
// RawResponse contains the underlying HTTP response.
RawResponse *http.Response
}
-// ContainerDeleteResult contains the result from method Container.Delete.
-type ContainerDeleteResult struct {
+// containerClientDeleteResult contains the result from method containerClient.Delete.
+type containerClientDeleteResult struct {
// ClientRequestID contains the information returned from the x-ms-client-request-id header response.
ClientRequestID *string
@@ -1523,15 +1575,15 @@ type ContainerDeleteResult struct {
Version *string
}
-// ContainerGetAccessPolicyResponse contains the response from method Container.GetAccessPolicy.
-type ContainerGetAccessPolicyResponse struct {
- ContainerGetAccessPolicyResult
+// containerClientGetAccessPolicyResponse contains the response from method containerClient.GetAccessPolicy.
+type containerClientGetAccessPolicyResponse struct {
+ containerClientGetAccessPolicyResult
// RawResponse contains the underlying HTTP response.
RawResponse *http.Response
}
-// ContainerGetAccessPolicyResult contains the result from method Container.GetAccessPolicy.
-type ContainerGetAccessPolicyResult struct {
+// containerClientGetAccessPolicyResult contains the result from method containerClient.GetAccessPolicy.
+type containerClientGetAccessPolicyResult struct {
// BlobPublicAccess contains the information returned from the x-ms-blob-public-access header response.
BlobPublicAccess *PublicAccessType `xml:"BlobPublicAccess"`
@@ -1557,15 +1609,15 @@ type ContainerGetAccessPolicyResult struct {
Version *string `xml:"Version"`
}
-// ContainerGetAccountInfoResponse contains the response from method Container.GetAccountInfo.
-type ContainerGetAccountInfoResponse struct {
- ContainerGetAccountInfoResult
+// containerClientGetAccountInfoResponse contains the response from method containerClient.GetAccountInfo.
+type containerClientGetAccountInfoResponse struct {
+ containerClientGetAccountInfoResult
// RawResponse contains the underlying HTTP response.
RawResponse *http.Response
}
-// ContainerGetAccountInfoResult contains the result from method Container.GetAccountInfo.
-type ContainerGetAccountInfoResult struct {
+// containerClientGetAccountInfoResult contains the result from method containerClient.GetAccountInfo.
+type containerClientGetAccountInfoResult struct {
// AccountKind contains the information returned from the x-ms-account-kind header response.
AccountKind *AccountKind
@@ -1585,15 +1637,15 @@ type ContainerGetAccountInfoResult struct {
Version *string
}
-// ContainerGetPropertiesResponse contains the response from method Container.GetProperties.
-type ContainerGetPropertiesResponse struct {
- ContainerGetPropertiesResult
+// containerClientGetPropertiesResponse contains the response from method containerClient.GetProperties.
+type containerClientGetPropertiesResponse struct {
+ containerClientGetPropertiesResult
// RawResponse contains the underlying HTTP response.
RawResponse *http.Response
}
-// ContainerGetPropertiesResult contains the result from method Container.GetProperties.
-type ContainerGetPropertiesResult struct {
+// containerClientGetPropertiesResult contains the result from method containerClient.GetProperties.
+type containerClientGetPropertiesResult struct {
// BlobPublicAccess contains the information returned from the x-ms-blob-public-access header response.
BlobPublicAccess *PublicAccessType
@@ -1618,6 +1670,10 @@ type ContainerGetPropertiesResult struct {
// HasLegalHold contains the information returned from the x-ms-has-legal-hold header response.
HasLegalHold *bool
+ // IsImmutableStorageWithVersioningEnabled contains the information returned from the x-ms-immutable-storage-with-versioning-enabled
+ // header response.
+ IsImmutableStorageWithVersioningEnabled *bool
+
// LastModified contains the information returned from the Last-Modified header response.
LastModified *time.Time
@@ -1640,15 +1696,15 @@ type ContainerGetPropertiesResult struct {
Version *string
}
-// ContainerListBlobFlatSegmentResponse contains the response from method Container.ListBlobFlatSegment.
-type ContainerListBlobFlatSegmentResponse struct {
- ContainerListBlobFlatSegmentResult
+// containerClientListBlobFlatSegmentResponse contains the response from method containerClient.ListBlobFlatSegment.
+type containerClientListBlobFlatSegmentResponse struct {
+ containerClientListBlobFlatSegmentResult
// RawResponse contains the underlying HTTP response.
RawResponse *http.Response
}
-// ContainerListBlobFlatSegmentResult contains the result from method Container.ListBlobFlatSegment.
-type ContainerListBlobFlatSegmentResult struct {
+// containerClientListBlobFlatSegmentResult contains the result from method containerClient.ListBlobFlatSegment.
+type containerClientListBlobFlatSegmentResult struct {
ListBlobsFlatSegmentResponse
// ClientRequestID contains the information returned from the x-ms-client-request-id header response.
ClientRequestID *string `xml:"ClientRequestID"`
@@ -1666,15 +1722,15 @@ type ContainerListBlobFlatSegmentResult struct {
Version *string `xml:"Version"`
}
-// ContainerListBlobHierarchySegmentResponse contains the response from method Container.ListBlobHierarchySegment.
-type ContainerListBlobHierarchySegmentResponse struct {
- ContainerListBlobHierarchySegmentResult
+// containerClientListBlobHierarchySegmentResponse contains the response from method containerClient.ListBlobHierarchySegment.
+type containerClientListBlobHierarchySegmentResponse struct {
+ containerClientListBlobHierarchySegmentResult
// RawResponse contains the underlying HTTP response.
RawResponse *http.Response
}
-// ContainerListBlobHierarchySegmentResult contains the result from method Container.ListBlobHierarchySegment.
-type ContainerListBlobHierarchySegmentResult struct {
+// containerClientListBlobHierarchySegmentResult contains the result from method containerClient.ListBlobHierarchySegment.
+type containerClientListBlobHierarchySegmentResult struct {
ListBlobsHierarchySegmentResponse
// ClientRequestID contains the information returned from the x-ms-client-request-id header response.
ClientRequestID *string `xml:"ClientRequestID"`
@@ -1692,43 +1748,15 @@ type ContainerListBlobHierarchySegmentResult struct {
Version *string `xml:"Version"`
}
-// ContainerReleaseLeaseResponse contains the response from method Container.ReleaseLease.
-type ContainerReleaseLeaseResponse struct {
- ContainerReleaseLeaseResult
- // RawResponse contains the underlying HTTP response.
- RawResponse *http.Response
-}
-
-// ContainerReleaseLeaseResult contains the result from method Container.ReleaseLease.
-type ContainerReleaseLeaseResult struct {
- // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
- ClientRequestID *string
-
- // Date contains the information returned from the Date header response.
- Date *time.Time
-
- // ETag contains the information returned from the ETag header response.
- ETag *string
-
- // LastModified contains the information returned from the Last-Modified header response.
- LastModified *time.Time
-
- // RequestID contains the information returned from the x-ms-request-id header response.
- RequestID *string
-
- // Version contains the information returned from the x-ms-version header response.
- Version *string
-}
-
-// ContainerRenewLeaseResponse contains the response from method Container.RenewLease.
-type ContainerRenewLeaseResponse struct {
- ContainerRenewLeaseResult
+// containerClientReleaseLeaseResponse contains the response from method containerClient.ReleaseLease.
+type containerClientReleaseLeaseResponse struct {
+ containerClientReleaseLeaseResult
// RawResponse contains the underlying HTTP response.
RawResponse *http.Response
}
-// ContainerRenewLeaseResult contains the result from method Container.RenewLease.
-type ContainerRenewLeaseResult struct {
+// containerClientReleaseLeaseResult contains the result from method containerClient.ReleaseLease.
+type containerClientReleaseLeaseResult struct {
// ClientRequestID contains the information returned from the x-ms-client-request-id header response.
ClientRequestID *string
@@ -1741,9 +1769,6 @@ type ContainerRenewLeaseResult struct {
// LastModified contains the information returned from the Last-Modified header response.
LastModified *time.Time
- // LeaseID contains the information returned from the x-ms-lease-id header response.
- LeaseID *string
-
// RequestID contains the information returned from the x-ms-request-id header response.
RequestID *string
@@ -1751,15 +1776,15 @@ type ContainerRenewLeaseResult struct {
Version *string
}
-// ContainerRestoreResponse contains the response from method Container.Restore.
-type ContainerRestoreResponse struct {
- ContainerRestoreResult
+// containerClientRenameResponse contains the response from method containerClient.Rename.
+type containerClientRenameResponse struct {
+ containerClientRenameResult
// RawResponse contains the underlying HTTP response.
RawResponse *http.Response
}
-// ContainerRestoreResult contains the result from method Container.Restore.
-type ContainerRestoreResult struct {
+// containerClientRenameResult contains the result from method containerClient.Rename.
+type containerClientRenameResult struct {
// ClientRequestID contains the information returned from the x-ms-client-request-id header response.
ClientRequestID *string
@@ -1773,15 +1798,15 @@ type ContainerRestoreResult struct {
Version *string
}
-// ContainerSetAccessPolicyResponse contains the response from method Container.SetAccessPolicy.
-type ContainerSetAccessPolicyResponse struct {
- ContainerSetAccessPolicyResult
+// containerClientRenewLeaseResponse contains the response from method containerClient.RenewLease.
+type containerClientRenewLeaseResponse struct {
+ containerClientRenewLeaseResult
// RawResponse contains the underlying HTTP response.
RawResponse *http.Response
}
-// ContainerSetAccessPolicyResult contains the result from method Container.SetAccessPolicy.
-type ContainerSetAccessPolicyResult struct {
+// containerClientRenewLeaseResult contains the result from method containerClient.RenewLease.
+type containerClientRenewLeaseResult struct {
// ClientRequestID contains the information returned from the x-ms-client-request-id header response.
ClientRequestID *string
@@ -1794,33 +1819,8 @@ type ContainerSetAccessPolicyResult struct {
// LastModified contains the information returned from the Last-Modified header response.
LastModified *time.Time
- // RequestID contains the information returned from the x-ms-request-id header response.
- RequestID *string
-
- // Version contains the information returned from the x-ms-version header response.
- Version *string
-}
-
-// ContainerSetMetadataResponse contains the response from method Container.SetMetadata.
-type ContainerSetMetadataResponse struct {
- ContainerSetMetadataResult
- // RawResponse contains the underlying HTTP response.
- RawResponse *http.Response
-}
-
-// ContainerSetMetadataResult contains the result from method Container.SetMetadata.
-type ContainerSetMetadataResult struct {
- // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
- ClientRequestID *string
-
- // Date contains the information returned from the Date header response.
- Date *time.Time
-
- // ETag contains the information returned from the ETag header response.
- ETag *string
-
- // LastModified contains the information returned from the Last-Modified header response.
- LastModified *time.Time
+ // LeaseID contains the information returned from the x-ms-lease-id header response.
+ LeaseID *string
// RequestID contains the information returned from the x-ms-request-id header response.
RequestID *string
@@ -1829,30 +1829,21 @@ type ContainerSetMetadataResult struct {
Version *string
}
-// DirectoryCreateResponse contains the response from method Directory.Create.
-type DirectoryCreateResponse struct {
- DirectoryCreateResult
+// containerClientRestoreResponse contains the response from method containerClient.Restore.
+type containerClientRestoreResponse struct {
+ containerClientRestoreResult
// RawResponse contains the underlying HTTP response.
RawResponse *http.Response
}
-// DirectoryCreateResult contains the result from method Directory.Create.
-type DirectoryCreateResult struct {
+// containerClientRestoreResult contains the result from method containerClient.Restore.
+type containerClientRestoreResult struct {
// ClientRequestID contains the information returned from the x-ms-client-request-id header response.
ClientRequestID *string
- // ContentLength contains the information returned from the Content-Length header response.
- ContentLength *int64
-
// Date contains the information returned from the Date header response.
Date *time.Time
- // ETag contains the information returned from the ETag header response.
- ETag *string
-
- // LastModified contains the information returned from the Last-Modified header response.
- LastModified *time.Time
-
// RequestID contains the information returned from the x-ms-request-id header response.
RequestID *string
@@ -1860,43 +1851,21 @@ type DirectoryCreateResult struct {
Version *string
}
-// DirectoryDeleteResponse contains the response from method Directory.Delete.
-type DirectoryDeleteResponse struct {
- DirectoryDeleteResult
+// containerClientSetAccessPolicyResponse contains the response from method containerClient.SetAccessPolicy.
+type containerClientSetAccessPolicyResponse struct {
+ containerClientSetAccessPolicyResult
// RawResponse contains the underlying HTTP response.
RawResponse *http.Response
}
-// DirectoryDeleteResult contains the result from method Directory.Delete.
-type DirectoryDeleteResult struct {
+// containerClientSetAccessPolicyResult contains the result from method containerClient.SetAccessPolicy.
+type containerClientSetAccessPolicyResult struct {
// ClientRequestID contains the information returned from the x-ms-client-request-id header response.
ClientRequestID *string
// Date contains the information returned from the Date header response.
Date *time.Time
- // Marker contains the information returned from the x-ms-continuation header response.
- Marker *string
-
- // RequestID contains the information returned from the x-ms-request-id header response.
- RequestID *string
-
- // Version contains the information returned from the x-ms-version header response.
- Version *string
-}
-
-// DirectoryGetAccessControlResponse contains the response from method Directory.GetAccessControl.
-type DirectoryGetAccessControlResponse struct {
- DirectoryGetAccessControlResult
- // RawResponse contains the underlying HTTP response.
- RawResponse *http.Response
-}
-
-// DirectoryGetAccessControlResult contains the result from method Directory.GetAccessControl.
-type DirectoryGetAccessControlResult struct {
- // Date contains the information returned from the Date header response.
- Date *time.Time
-
// ETag contains the information returned from the ETag header response.
ETag *string
@@ -1908,35 +1877,20 @@ type DirectoryGetAccessControlResult struct {
// Version contains the information returned from the x-ms-version header response.
Version *string
-
- // XMSACL contains the information returned from the x-ms-acl header response.
- XMSACL *string
-
- // XMSGroup contains the information returned from the x-ms-group header response.
- XMSGroup *string
-
- // XMSOwner contains the information returned from the x-ms-owner header response.
- XMSOwner *string
-
- // XMSPermissions contains the information returned from the x-ms-permissions header response.
- XMSPermissions *string
}
-// DirectoryRenameResponse contains the response from method Directory.Rename.
-type DirectoryRenameResponse struct {
- DirectoryRenameResult
+// containerClientSetMetadataResponse contains the response from method containerClient.SetMetadata.
+type containerClientSetMetadataResponse struct {
+ containerClientSetMetadataResult
// RawResponse contains the underlying HTTP response.
RawResponse *http.Response
}
-// DirectoryRenameResult contains the result from method Directory.Rename.
-type DirectoryRenameResult struct {
+// containerClientSetMetadataResult contains the result from method containerClient.SetMetadata.
+type containerClientSetMetadataResult struct {
// ClientRequestID contains the information returned from the x-ms-client-request-id header response.
ClientRequestID *string
- // ContentLength contains the information returned from the Content-Length header response.
- ContentLength *int64
-
// Date contains the information returned from the Date header response.
Date *time.Time
@@ -1946,9 +1900,6 @@ type DirectoryRenameResult struct {
// LastModified contains the information returned from the Last-Modified header response.
LastModified *time.Time
- // Marker contains the information returned from the x-ms-continuation header response.
- Marker *string
-
// RequestID contains the information returned from the x-ms-request-id header response.
RequestID *string
@@ -1956,23 +1907,17 @@ type DirectoryRenameResult struct {
Version *string
}
-// DirectorySetAccessControlResponse contains the response from method Directory.SetAccessControl.
-type DirectorySetAccessControlResponse struct {
- DirectorySetAccessControlResult
+// containerClientSubmitBatchResponse contains the response from method containerClient.SubmitBatch.
+type containerClientSubmitBatchResponse struct {
+ containerClientSubmitBatchResult
// RawResponse contains the underlying HTTP response.
RawResponse *http.Response
}
-// DirectorySetAccessControlResult contains the result from method Directory.SetAccessControl.
-type DirectorySetAccessControlResult struct {
- // Date contains the information returned from the Date header response.
- Date *time.Time
-
- // ETag contains the information returned from the ETag header response.
- ETag *string
-
- // LastModified contains the information returned from the Last-Modified header response.
- LastModified *time.Time
+// containerClientSubmitBatchResult contains the result from method containerClient.SubmitBatch.
+type containerClientSubmitBatchResult struct {
+ // ContentType contains the information returned from the Content-Type header response.
+ ContentType *string
// RequestID contains the information returned from the x-ms-request-id header response.
RequestID *string
@@ -1981,15 +1926,15 @@ type DirectorySetAccessControlResult struct {
Version *string
}
-// PageBlobClearPagesResponse contains the response from method PageBlob.ClearPages.
-type PageBlobClearPagesResponse struct {
- PageBlobClearPagesResult
+// pageBlobClientClearPagesResponse contains the response from method pageBlobClient.ClearPages.
+type pageBlobClientClearPagesResponse struct {
+ pageBlobClientClearPagesResult
// RawResponse contains the underlying HTTP response.
RawResponse *http.Response
}
-// PageBlobClearPagesResult contains the result from method PageBlob.ClearPages.
-type PageBlobClearPagesResult struct {
+// pageBlobClientClearPagesResult contains the result from method pageBlobClient.ClearPages.
+type pageBlobClientClearPagesResult struct {
// BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response.
BlobSequenceNumber *int64
@@ -2018,15 +1963,15 @@ type PageBlobClearPagesResult struct {
XMSContentCRC64 []byte
}
-// PageBlobCopyIncrementalResponse contains the response from method PageBlob.CopyIncremental.
-type PageBlobCopyIncrementalResponse struct {
- PageBlobCopyIncrementalResult
+// pageBlobClientCopyIncrementalResponse contains the response from method pageBlobClient.CopyIncremental.
+type pageBlobClientCopyIncrementalResponse struct {
+ pageBlobClientCopyIncrementalResult
// RawResponse contains the underlying HTTP response.
RawResponse *http.Response
}
-// PageBlobCopyIncrementalResult contains the result from method PageBlob.CopyIncremental.
-type PageBlobCopyIncrementalResult struct {
+// pageBlobClientCopyIncrementalResult contains the result from method pageBlobClient.CopyIncremental.
+type pageBlobClientCopyIncrementalResult struct {
// ClientRequestID contains the information returned from the x-ms-client-request-id header response.
ClientRequestID *string
@@ -2052,15 +1997,15 @@ type PageBlobCopyIncrementalResult struct {
Version *string
}
-// PageBlobCreateResponse contains the response from method PageBlob.Create.
-type PageBlobCreateResponse struct {
- PageBlobCreateResult
+// pageBlobClientCreateResponse contains the response from method pageBlobClient.Create.
+type pageBlobClientCreateResponse struct {
+ pageBlobClientCreateResult
// RawResponse contains the underlying HTTP response.
RawResponse *http.Response
}
-// PageBlobCreateResult contains the result from method PageBlob.Create.
-type PageBlobCreateResult struct {
+// pageBlobClientCreateResult contains the result from method pageBlobClient.Create.
+type pageBlobClientCreateResult struct {
// ClientRequestID contains the information returned from the x-ms-client-request-id header response.
ClientRequestID *string
@@ -2095,15 +2040,15 @@ type PageBlobCreateResult struct {
VersionID *string
}
-// PageBlobGetPageRangesDiffResponse contains the response from method PageBlob.GetPageRangesDiff.
-type PageBlobGetPageRangesDiffResponse struct {
- PageBlobGetPageRangesDiffResult
+// pageBlobClientGetPageRangesDiffResponse contains the response from method pageBlobClient.GetPageRangesDiff.
+type pageBlobClientGetPageRangesDiffResponse struct {
+ pageBlobClientGetPageRangesDiffResult
// RawResponse contains the underlying HTTP response.
RawResponse *http.Response
}
-// PageBlobGetPageRangesDiffResult contains the result from method PageBlob.GetPageRangesDiff.
-type PageBlobGetPageRangesDiffResult struct {
+// pageBlobClientGetPageRangesDiffResult contains the result from method pageBlobClient.GetPageRangesDiff.
+type pageBlobClientGetPageRangesDiffResult struct {
PageList
// BlobContentLength contains the information returned from the x-ms-blob-content-length header response.
BlobContentLength *int64 `xml:"BlobContentLength"`
@@ -2127,15 +2072,15 @@ type PageBlobGetPageRangesDiffResult struct {
Version *string `xml:"Version"`
}
-// PageBlobGetPageRangesResponse contains the response from method PageBlob.GetPageRanges.
-type PageBlobGetPageRangesResponse struct {
- PageBlobGetPageRangesResult
+// pageBlobClientGetPageRangesResponse contains the response from method pageBlobClient.GetPageRanges.
+type pageBlobClientGetPageRangesResponse struct {
+ pageBlobClientGetPageRangesResult
// RawResponse contains the underlying HTTP response.
RawResponse *http.Response
}
-// PageBlobGetPageRangesResult contains the result from method PageBlob.GetPageRanges.
-type PageBlobGetPageRangesResult struct {
+// pageBlobClientGetPageRangesResult contains the result from method pageBlobClient.GetPageRanges.
+type pageBlobClientGetPageRangesResult struct {
PageList
// BlobContentLength contains the information returned from the x-ms-blob-content-length header response.
BlobContentLength *int64 `xml:"BlobContentLength"`
@@ -2159,15 +2104,15 @@ type PageBlobGetPageRangesResult struct {
Version *string `xml:"Version"`
}
-// PageBlobResizeResponse contains the response from method PageBlob.Resize.
-type PageBlobResizeResponse struct {
- PageBlobResizeResult
+// pageBlobClientResizeResponse contains the response from method pageBlobClient.Resize.
+type pageBlobClientResizeResponse struct {
+ pageBlobClientResizeResult
// RawResponse contains the underlying HTTP response.
RawResponse *http.Response
}
-// PageBlobResizeResult contains the result from method PageBlob.Resize.
-type PageBlobResizeResult struct {
+// pageBlobClientResizeResult contains the result from method pageBlobClient.Resize.
+type pageBlobClientResizeResult struct {
// BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response.
BlobSequenceNumber *int64
@@ -2190,15 +2135,15 @@ type PageBlobResizeResult struct {
Version *string
}
-// PageBlobUpdateSequenceNumberResponse contains the response from method PageBlob.UpdateSequenceNumber.
-type PageBlobUpdateSequenceNumberResponse struct {
- PageBlobUpdateSequenceNumberResult
+// pageBlobClientUpdateSequenceNumberResponse contains the response from method pageBlobClient.UpdateSequenceNumber.
+type pageBlobClientUpdateSequenceNumberResponse struct {
+ pageBlobClientUpdateSequenceNumberResult
// RawResponse contains the underlying HTTP response.
RawResponse *http.Response
}
-// PageBlobUpdateSequenceNumberResult contains the result from method PageBlob.UpdateSequenceNumber.
-type PageBlobUpdateSequenceNumberResult struct {
+// pageBlobClientUpdateSequenceNumberResult contains the result from method pageBlobClient.UpdateSequenceNumber.
+type pageBlobClientUpdateSequenceNumberResult struct {
// BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response.
BlobSequenceNumber *int64
@@ -2221,15 +2166,15 @@ type PageBlobUpdateSequenceNumberResult struct {
Version *string
}
-// PageBlobUploadPagesFromURLResponse contains the response from method PageBlob.UploadPagesFromURL.
-type PageBlobUploadPagesFromURLResponse struct {
- PageBlobUploadPagesFromURLResult
+// pageBlobClientUploadPagesFromURLResponse contains the response from method pageBlobClient.UploadPagesFromURL.
+type pageBlobClientUploadPagesFromURLResponse struct {
+ pageBlobClientUploadPagesFromURLResult
// RawResponse contains the underlying HTTP response.
RawResponse *http.Response
}
-// PageBlobUploadPagesFromURLResult contains the result from method PageBlob.UploadPagesFromURL.
-type PageBlobUploadPagesFromURLResult struct {
+// pageBlobClientUploadPagesFromURLResult contains the result from method pageBlobClient.UploadPagesFromURL.
+type pageBlobClientUploadPagesFromURLResult struct {
// BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response.
BlobSequenceNumber *int64
@@ -2264,15 +2209,15 @@ type PageBlobUploadPagesFromURLResult struct {
XMSContentCRC64 []byte
}
-// PageBlobUploadPagesResponse contains the response from method PageBlob.UploadPages.
-type PageBlobUploadPagesResponse struct {
- PageBlobUploadPagesResult
+// pageBlobClientUploadPagesResponse contains the response from method pageBlobClient.UploadPages.
+type pageBlobClientUploadPagesResponse struct {
+ pageBlobClientUploadPagesResult
// RawResponse contains the underlying HTTP response.
RawResponse *http.Response
}
-// PageBlobUploadPagesResult contains the result from method PageBlob.UploadPages.
-type PageBlobUploadPagesResult struct {
+// pageBlobClientUploadPagesResult contains the result from method pageBlobClient.UploadPages.
+type pageBlobClientUploadPagesResult struct {
// BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response.
BlobSequenceNumber *int64
@@ -2310,15 +2255,15 @@ type PageBlobUploadPagesResult struct {
XMSContentCRC64 []byte
}
-// ServiceFilterBlobsResponse contains the response from method Service.FilterBlobs.
-type ServiceFilterBlobsResponse struct {
- ServiceFilterBlobsResult
+// serviceClientFilterBlobsResponse contains the response from method serviceClient.FilterBlobs.
+type serviceClientFilterBlobsResponse struct {
+ serviceClientFilterBlobsResult
// RawResponse contains the underlying HTTP response.
RawResponse *http.Response
}
-// ServiceFilterBlobsResult contains the result from method Service.FilterBlobs.
-type ServiceFilterBlobsResult struct {
+// serviceClientFilterBlobsResult contains the result from method serviceClient.FilterBlobs.
+type serviceClientFilterBlobsResult struct {
FilterBlobSegment
// ClientRequestID contains the information returned from the x-ms-client-request-id header response.
ClientRequestID *string `xml:"ClientRequestID"`
@@ -2333,15 +2278,15 @@ type ServiceFilterBlobsResult struct {
Version *string `xml:"Version"`
}
-// ServiceGetAccountInfoResponse contains the response from method Service.GetAccountInfo.
-type ServiceGetAccountInfoResponse struct {
- ServiceGetAccountInfoResult
+// serviceClientGetAccountInfoResponse contains the response from method serviceClient.GetAccountInfo.
+type serviceClientGetAccountInfoResponse struct {
+ serviceClientGetAccountInfoResult
// RawResponse contains the underlying HTTP response.
RawResponse *http.Response
}
-// ServiceGetAccountInfoResult contains the result from method Service.GetAccountInfo.
-type ServiceGetAccountInfoResult struct {
+// serviceClientGetAccountInfoResult contains the result from method serviceClient.GetAccountInfo.
+type serviceClientGetAccountInfoResult struct {
// AccountKind contains the information returned from the x-ms-account-kind header response.
AccountKind *AccountKind
@@ -2364,15 +2309,15 @@ type ServiceGetAccountInfoResult struct {
Version *string
}
-// ServiceGetPropertiesResponse contains the response from method Service.GetProperties.
-type ServiceGetPropertiesResponse struct {
- ServiceGetPropertiesResult
+// serviceClientGetPropertiesResponse contains the response from method serviceClient.GetProperties.
+type serviceClientGetPropertiesResponse struct {
+ serviceClientGetPropertiesResult
// RawResponse contains the underlying HTTP response.
RawResponse *http.Response
}
-// ServiceGetPropertiesResult contains the result from method Service.GetProperties.
-type ServiceGetPropertiesResult struct {
+// serviceClientGetPropertiesResult contains the result from method serviceClient.GetProperties.
+type serviceClientGetPropertiesResult struct {
StorageServiceProperties
// ClientRequestID contains the information returned from the x-ms-client-request-id header response.
ClientRequestID *string `xml:"ClientRequestID"`
@@ -2384,15 +2329,15 @@ type ServiceGetPropertiesResult struct {
Version *string `xml:"Version"`
}
-// ServiceGetStatisticsResponse contains the response from method Service.GetStatistics.
-type ServiceGetStatisticsResponse struct {
- ServiceGetStatisticsResult
+// serviceClientGetStatisticsResponse contains the response from method serviceClient.GetStatistics.
+type serviceClientGetStatisticsResponse struct {
+ serviceClientGetStatisticsResult
// RawResponse contains the underlying HTTP response.
RawResponse *http.Response
}
-// ServiceGetStatisticsResult contains the result from method Service.GetStatistics.
-type ServiceGetStatisticsResult struct {
+// serviceClientGetStatisticsResult contains the result from method serviceClient.GetStatistics.
+type serviceClientGetStatisticsResult struct {
StorageServiceStats
// ClientRequestID contains the information returned from the x-ms-client-request-id header response.
ClientRequestID *string `xml:"ClientRequestID"`
@@ -2407,15 +2352,15 @@ type ServiceGetStatisticsResult struct {
Version *string `xml:"Version"`
}
-// ServiceGetUserDelegationKeyResponse contains the response from method Service.GetUserDelegationKey.
-type ServiceGetUserDelegationKeyResponse struct {
- ServiceGetUserDelegationKeyResult
+// serviceClientGetUserDelegationKeyResponse contains the response from method serviceClient.GetUserDelegationKey.
+type serviceClientGetUserDelegationKeyResponse struct {
+ serviceClientGetUserDelegationKeyResult
// RawResponse contains the underlying HTTP response.
RawResponse *http.Response
}
-// ServiceGetUserDelegationKeyResult contains the result from method Service.GetUserDelegationKey.
-type ServiceGetUserDelegationKeyResult struct {
+// serviceClientGetUserDelegationKeyResult contains the result from method serviceClient.GetUserDelegationKey.
+type serviceClientGetUserDelegationKeyResult struct {
UserDelegationKey
// ClientRequestID contains the information returned from the x-ms-client-request-id header response.
ClientRequestID *string `xml:"ClientRequestID"`
@@ -2430,15 +2375,15 @@ type ServiceGetUserDelegationKeyResult struct {
Version *string `xml:"Version"`
}
-// ServiceListContainersSegmentResponse contains the response from method Service.ListContainers.
-type ServiceListContainersSegmentResponse struct {
- ServiceListContainersSegmentResult
+// serviceClientListContainersSegmentResponse contains the response from method serviceClient.ListContainersSegment.
+type serviceClientListContainersSegmentResponse struct {
+ serviceClientListContainersSegmentResult
// RawResponse contains the underlying HTTP response.
RawResponse *http.Response
}
-// ServiceListContainersSegmentResult contains the result from method Service.ListContainers.
-type ServiceListContainersSegmentResult struct {
+// serviceClientListContainersSegmentResult contains the result from method serviceClient.ListContainersSegment.
+type serviceClientListContainersSegmentResult struct {
ListContainersSegmentResponse
// ClientRequestID contains the information returned from the x-ms-client-request-id header response.
ClientRequestID *string `xml:"ClientRequestID"`
@@ -2450,15 +2395,15 @@ type ServiceListContainersSegmentResult struct {
Version *string `xml:"Version"`
}
-// ServiceSetPropertiesResponse contains the response from method Service.SetProperties.
-type ServiceSetPropertiesResponse struct {
- ServiceSetPropertiesResult
+// serviceClientSetPropertiesResponse contains the response from method serviceClient.SetProperties.
+type serviceClientSetPropertiesResponse struct {
+ serviceClientSetPropertiesResult
// RawResponse contains the underlying HTTP response.
RawResponse *http.Response
}
-// ServiceSetPropertiesResult contains the result from method Service.SetProperties.
-type ServiceSetPropertiesResult struct {
+// serviceClientSetPropertiesResult contains the result from method serviceClient.SetProperties.
+type serviceClientSetPropertiesResult struct {
// ClientRequestID contains the information returned from the x-ms-client-request-id header response.
ClientRequestID *string
@@ -2469,15 +2414,15 @@ type ServiceSetPropertiesResult struct {
Version *string
}
-// ServiceSubmitBatchResponse contains the response from method Service.SubmitBatch.
-type ServiceSubmitBatchResponse struct {
- ServiceSubmitBatchResult
+// serviceClientSubmitBatchResponse contains the response from method serviceClient.SubmitBatch.
+type serviceClientSubmitBatchResponse struct {
+ serviceClientSubmitBatchResult
// RawResponse contains the underlying HTTP response.
RawResponse *http.Response
}
-// ServiceSubmitBatchResult contains the result from method Service.SubmitBatch.
-type ServiceSubmitBatchResult struct {
+// serviceClientSubmitBatchResult contains the result from method serviceClient.SubmitBatch.
+type serviceClientSubmitBatchResult struct {
// ContentType contains the information returned from the Content-Type header response.
ContentType *string
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_service_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_service_client.go
index d4fa7768a10..7dcf6ef13e3 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_service_client.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_service_client.go
@@ -1,5 +1,5 @@
-//go:build go1.16
-// +build go1.16
+//go:build go1.18
+// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
@@ -11,42 +11,54 @@ package azblob
import (
"context"
"fmt"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
"io"
"net/http"
"strconv"
"strings"
"time"
-
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
)
type serviceClient struct {
- con *connection
+ endpoint string
+ pl runtime.Pipeline
+}
+
+// newServiceClient creates a new instance of serviceClient with the specified values.
+// endpoint - The URL of the service account, container, or blob that is the target of the desired operation.
+// pl - the pipeline used for sending requests and handling responses.
+func newServiceClient(endpoint string, pl runtime.Pipeline) *serviceClient {
+ client := &serviceClient{
+ endpoint: endpoint,
+ pl: pl,
+ }
+ return client
}
-// FilterBlobs - The Filter Blobs operation enables callers to list blobs across all containers whose tags match a given search expression. Filter blobs
-// searches across all containers within a storage account but can
+// FilterBlobs - The Filter Blobs operation enables callers to list blobs across all containers whose tags match a given search
+// expression. Filter blobs searches across all containers within a storage account but can
// be scoped within the expression to a single container.
-// If the operation fails it returns the *StorageError error type.
-func (client *serviceClient) FilterBlobs(ctx context.Context, options *ServiceFilterBlobsOptions) (ServiceFilterBlobsResponse, error) {
+// If the operation fails it returns an *azcore.ResponseError type.
+// options - serviceClientFilterBlobsOptions contains the optional parameters for the serviceClient.FilterBlobs method.
+func (client *serviceClient) FilterBlobs(ctx context.Context, options *serviceClientFilterBlobsOptions) (serviceClientFilterBlobsResponse, error) {
req, err := client.filterBlobsCreateRequest(ctx, options)
if err != nil {
- return ServiceFilterBlobsResponse{}, err
+ return serviceClientFilterBlobsResponse{}, err
}
- resp, err := client.con.Pipeline().Do(req)
+ resp, err := client.pl.Do(req)
if err != nil {
- return ServiceFilterBlobsResponse{}, err
+ return serviceClientFilterBlobsResponse{}, err
}
if !runtime.HasStatusCode(resp, http.StatusOK) {
- return ServiceFilterBlobsResponse{}, runtime.NewResponseError(resp)
+ return serviceClientFilterBlobsResponse{}, runtime.NewResponseError(resp)
}
return client.filterBlobsHandleResponse(resp)
}
// filterBlobsCreateRequest creates the FilterBlobs request.
-func (client *serviceClient) filterBlobsCreateRequest(ctx context.Context, options *ServiceFilterBlobsOptions) (*policy.Request, error) {
- req, err := runtime.NewRequest(ctx, http.MethodGet, client.con.Endpoint())
+func (client *serviceClient) filterBlobsCreateRequest(ctx context.Context, options *serviceClientFilterBlobsOptions) (*policy.Request, error) {
+ req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint)
if err != nil {
return nil, err
}
@@ -65,7 +77,7 @@ func (client *serviceClient) filterBlobsCreateRequest(ctx context.Context, optio
reqQP.Set("maxresults", strconv.FormatInt(int64(*options.Maxresults), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
- req.Raw().Header.Set("x-ms-version", "2019-12-12")
+ req.Raw().Header.Set("x-ms-version", "2020-10-02")
if options != nil && options.RequestID != nil {
req.Raw().Header.Set("x-ms-client-request-id", *options.RequestID)
}
@@ -74,8 +86,8 @@ func (client *serviceClient) filterBlobsCreateRequest(ctx context.Context, optio
}
// filterBlobsHandleResponse handles the FilterBlobs response.
-func (client *serviceClient) filterBlobsHandleResponse(resp *http.Response) (ServiceFilterBlobsResponse, error) {
- result := ServiceFilterBlobsResponse{RawResponse: resp}
+func (client *serviceClient) filterBlobsHandleResponse(resp *http.Response) (serviceClientFilterBlobsResponse, error) {
+ result := serviceClientFilterBlobsResponse{RawResponse: resp}
if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
result.ClientRequestID = &val
}
@@ -88,36 +100,37 @@ func (client *serviceClient) filterBlobsHandleResponse(resp *http.Response) (Ser
if val := resp.Header.Get("Date"); val != "" {
date, err := time.Parse(time.RFC1123, val)
if err != nil {
- return ServiceFilterBlobsResponse{}, err
+ return serviceClientFilterBlobsResponse{}, err
}
result.Date = &date
}
if err := runtime.UnmarshalAsXML(resp, &result.FilterBlobSegment); err != nil {
- return ServiceFilterBlobsResponse{}, err
+ return serviceClientFilterBlobsResponse{}, err
}
return result, nil
}
// GetAccountInfo - Returns the sku name and account kind
-// If the operation fails it returns the *StorageError error type.
-func (client *serviceClient) GetAccountInfo(ctx context.Context, options *ServiceGetAccountInfoOptions) (ServiceGetAccountInfoResponse, error) {
+// If the operation fails it returns an *azcore.ResponseError type.
+// options - serviceClientGetAccountInfoOptions contains the optional parameters for the serviceClient.GetAccountInfo method.
+func (client *serviceClient) GetAccountInfo(ctx context.Context, options *serviceClientGetAccountInfoOptions) (serviceClientGetAccountInfoResponse, error) {
req, err := client.getAccountInfoCreateRequest(ctx, options)
if err != nil {
- return ServiceGetAccountInfoResponse{}, err
+ return serviceClientGetAccountInfoResponse{}, err
}
- resp, err := client.con.Pipeline().Do(req)
+ resp, err := client.pl.Do(req)
if err != nil {
- return ServiceGetAccountInfoResponse{}, err
+ return serviceClientGetAccountInfoResponse{}, err
}
if !runtime.HasStatusCode(resp, http.StatusOK) {
- return ServiceGetAccountInfoResponse{}, runtime.NewResponseError(resp)
+ return serviceClientGetAccountInfoResponse{}, runtime.NewResponseError(resp)
}
return client.getAccountInfoHandleResponse(resp)
}
// getAccountInfoCreateRequest creates the GetAccountInfo request.
-func (client *serviceClient) getAccountInfoCreateRequest(ctx context.Context, options *ServiceGetAccountInfoOptions) (*policy.Request, error) {
- req, err := runtime.NewRequest(ctx, http.MethodGet, client.con.Endpoint())
+func (client *serviceClient) getAccountInfoCreateRequest(ctx context.Context, options *serviceClientGetAccountInfoOptions) (*policy.Request, error) {
+ req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint)
if err != nil {
return nil, err
}
@@ -125,14 +138,14 @@ func (client *serviceClient) getAccountInfoCreateRequest(ctx context.Context, op
reqQP.Set("restype", "account")
reqQP.Set("comp", "properties")
req.Raw().URL.RawQuery = reqQP.Encode()
- req.Raw().Header.Set("x-ms-version", "2019-12-12")
+ req.Raw().Header.Set("x-ms-version", "2020-10-02")
req.Raw().Header.Set("Accept", "application/xml")
return req, nil
}
// getAccountInfoHandleResponse handles the GetAccountInfo response.
-func (client *serviceClient) getAccountInfoHandleResponse(resp *http.Response) (ServiceGetAccountInfoResponse, error) {
- result := ServiceGetAccountInfoResponse{RawResponse: resp}
+func (client *serviceClient) getAccountInfoHandleResponse(resp *http.Response) (serviceClientGetAccountInfoResponse, error) {
+ result := serviceClientGetAccountInfoResponse{RawResponse: resp}
if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
result.ClientRequestID = &val
}
@@ -145,7 +158,7 @@ func (client *serviceClient) getAccountInfoHandleResponse(resp *http.Response) (
if val := resp.Header.Get("Date"); val != "" {
date, err := time.Parse(time.RFC1123, val)
if err != nil {
- return ServiceGetAccountInfoResponse{}, err
+ return serviceClientGetAccountInfoResponse{}, err
}
result.Date = &date
}
@@ -158,34 +171,35 @@ func (client *serviceClient) getAccountInfoHandleResponse(resp *http.Response) (
if val := resp.Header.Get("x-ms-is-hns-enabled"); val != "" {
isHierarchicalNamespaceEnabled, err := strconv.ParseBool(val)
if err != nil {
- return ServiceGetAccountInfoResponse{}, err
+ return serviceClientGetAccountInfoResponse{}, err
}
result.IsHierarchicalNamespaceEnabled = &isHierarchicalNamespaceEnabled
}
return result, nil
}
-// GetProperties - gets the properties of a storage account's Blob service, including properties for Storage Analytics and CORS (Cross-Origin Resource Sharing)
-// rules.
-// If the operation fails it returns the *StorageError error type.
-func (client *serviceClient) GetProperties(ctx context.Context, options *ServiceGetPropertiesOptions) (ServiceGetPropertiesResponse, error) {
+// GetProperties - gets the properties of a storage account's Blob service, including properties for Storage Analytics and
+// CORS (Cross-Origin Resource Sharing) rules.
+// If the operation fails it returns an *azcore.ResponseError type.
+// options - serviceClientGetPropertiesOptions contains the optional parameters for the serviceClient.GetProperties method.
+func (client *serviceClient) GetProperties(ctx context.Context, options *serviceClientGetPropertiesOptions) (serviceClientGetPropertiesResponse, error) {
req, err := client.getPropertiesCreateRequest(ctx, options)
if err != nil {
- return ServiceGetPropertiesResponse{}, err
+ return serviceClientGetPropertiesResponse{}, err
}
- resp, err := client.con.Pipeline().Do(req)
+ resp, err := client.pl.Do(req)
if err != nil {
- return ServiceGetPropertiesResponse{}, err
+ return serviceClientGetPropertiesResponse{}, err
}
if !runtime.HasStatusCode(resp, http.StatusOK) {
- return ServiceGetPropertiesResponse{}, runtime.NewResponseError(resp)
+ return serviceClientGetPropertiesResponse{}, runtime.NewResponseError(resp)
}
return client.getPropertiesHandleResponse(resp)
}
// getPropertiesCreateRequest creates the GetProperties request.
-func (client *serviceClient) getPropertiesCreateRequest(ctx context.Context, options *ServiceGetPropertiesOptions) (*policy.Request, error) {
- req, err := runtime.NewRequest(ctx, http.MethodGet, client.con.Endpoint())
+func (client *serviceClient) getPropertiesCreateRequest(ctx context.Context, options *serviceClientGetPropertiesOptions) (*policy.Request, error) {
+ req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint)
if err != nil {
return nil, err
}
@@ -196,7 +210,7 @@ func (client *serviceClient) getPropertiesCreateRequest(ctx context.Context, opt
reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
- req.Raw().Header.Set("x-ms-version", "2019-12-12")
+ req.Raw().Header.Set("x-ms-version", "2020-10-02")
if options != nil && options.RequestID != nil {
req.Raw().Header.Set("x-ms-client-request-id", *options.RequestID)
}
@@ -205,8 +219,8 @@ func (client *serviceClient) getPropertiesCreateRequest(ctx context.Context, opt
}
// getPropertiesHandleResponse handles the GetProperties response.
-func (client *serviceClient) getPropertiesHandleResponse(resp *http.Response) (ServiceGetPropertiesResponse, error) {
- result := ServiceGetPropertiesResponse{RawResponse: resp}
+func (client *serviceClient) getPropertiesHandleResponse(resp *http.Response) (serviceClientGetPropertiesResponse, error) {
+ result := serviceClientGetPropertiesResponse{RawResponse: resp}
if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
result.ClientRequestID = &val
}
@@ -217,32 +231,33 @@ func (client *serviceClient) getPropertiesHandleResponse(resp *http.Response) (S
result.Version = &val
}
if err := runtime.UnmarshalAsXML(resp, &result.StorageServiceProperties); err != nil {
- return ServiceGetPropertiesResponse{}, err
+ return serviceClientGetPropertiesResponse{}, err
}
return result, nil
}
-// GetStatistics - Retrieves statistics related to replication for the Blob service. It is only available on the secondary location endpoint when read-access
-// geo-redundant replication is enabled for the storage account.
-// If the operation fails it returns the *StorageError error type.
-func (client *serviceClient) GetStatistics(ctx context.Context, options *ServiceGetStatisticsOptions) (ServiceGetStatisticsResponse, error) {
+// GetStatistics - Retrieves statistics related to replication for the Blob service. It is only available on the secondary
+// location endpoint when read-access geo-redundant replication is enabled for the storage account.
+// If the operation fails it returns an *azcore.ResponseError type.
+// options - serviceClientGetStatisticsOptions contains the optional parameters for the serviceClient.GetStatistics method.
+func (client *serviceClient) GetStatistics(ctx context.Context, options *serviceClientGetStatisticsOptions) (serviceClientGetStatisticsResponse, error) {
req, err := client.getStatisticsCreateRequest(ctx, options)
if err != nil {
- return ServiceGetStatisticsResponse{}, err
+ return serviceClientGetStatisticsResponse{}, err
}
- resp, err := client.con.Pipeline().Do(req)
+ resp, err := client.pl.Do(req)
if err != nil {
- return ServiceGetStatisticsResponse{}, err
+ return serviceClientGetStatisticsResponse{}, err
}
if !runtime.HasStatusCode(resp, http.StatusOK) {
- return ServiceGetStatisticsResponse{}, runtime.NewResponseError(resp)
+ return serviceClientGetStatisticsResponse{}, runtime.NewResponseError(resp)
}
return client.getStatisticsHandleResponse(resp)
}
// getStatisticsCreateRequest creates the GetStatistics request.
-func (client *serviceClient) getStatisticsCreateRequest(ctx context.Context, options *ServiceGetStatisticsOptions) (*policy.Request, error) {
- req, err := runtime.NewRequest(ctx, http.MethodGet, client.con.Endpoint())
+func (client *serviceClient) getStatisticsCreateRequest(ctx context.Context, options *serviceClientGetStatisticsOptions) (*policy.Request, error) {
+ req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint)
if err != nil {
return nil, err
}
@@ -253,7 +268,7 @@ func (client *serviceClient) getStatisticsCreateRequest(ctx context.Context, opt
reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
- req.Raw().Header.Set("x-ms-version", "2019-12-12")
+ req.Raw().Header.Set("x-ms-version", "2020-10-02")
if options != nil && options.RequestID != nil {
req.Raw().Header.Set("x-ms-client-request-id", *options.RequestID)
}
@@ -262,8 +277,8 @@ func (client *serviceClient) getStatisticsCreateRequest(ctx context.Context, opt
}
// getStatisticsHandleResponse handles the GetStatistics response.
-func (client *serviceClient) getStatisticsHandleResponse(resp *http.Response) (ServiceGetStatisticsResponse, error) {
- result := ServiceGetStatisticsResponse{RawResponse: resp}
+func (client *serviceClient) getStatisticsHandleResponse(resp *http.Response) (serviceClientGetStatisticsResponse, error) {
+ result := serviceClientGetStatisticsResponse{RawResponse: resp}
if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
result.ClientRequestID = &val
}
@@ -276,36 +291,40 @@ func (client *serviceClient) getStatisticsHandleResponse(resp *http.Response) (S
if val := resp.Header.Get("Date"); val != "" {
date, err := time.Parse(time.RFC1123, val)
if err != nil {
- return ServiceGetStatisticsResponse{}, err
+ return serviceClientGetStatisticsResponse{}, err
}
result.Date = &date
}
if err := runtime.UnmarshalAsXML(resp, &result.StorageServiceStats); err != nil {
- return ServiceGetStatisticsResponse{}, err
+ return serviceClientGetStatisticsResponse{}, err
}
return result, nil
}
-// GetUserDelegationKey - Retrieves a user delegation key for the Blob service. This is only a valid operation when using bearer token authentication.
-// If the operation fails it returns the *StorageError error type.
-func (client *serviceClient) GetUserDelegationKey(ctx context.Context, keyInfo KeyInfo, options *ServiceGetUserDelegationKeyOptions) (ServiceGetUserDelegationKeyResponse, error) {
+// GetUserDelegationKey - Retrieves a user delegation key for the Blob service. This is only a valid operation when using
+// bearer token authentication.
+// If the operation fails it returns an *azcore.ResponseError type.
+// keyInfo - Key information
+// options - serviceClientGetUserDelegationKeyOptions contains the optional parameters for the serviceClient.GetUserDelegationKey
+// method.
+func (client *serviceClient) GetUserDelegationKey(ctx context.Context, keyInfo KeyInfo, options *serviceClientGetUserDelegationKeyOptions) (serviceClientGetUserDelegationKeyResponse, error) {
req, err := client.getUserDelegationKeyCreateRequest(ctx, keyInfo, options)
if err != nil {
- return ServiceGetUserDelegationKeyResponse{}, err
+ return serviceClientGetUserDelegationKeyResponse{}, err
}
- resp, err := client.con.Pipeline().Do(req)
+ resp, err := client.pl.Do(req)
if err != nil {
- return ServiceGetUserDelegationKeyResponse{}, err
+ return serviceClientGetUserDelegationKeyResponse{}, err
}
if !runtime.HasStatusCode(resp, http.StatusOK) {
- return ServiceGetUserDelegationKeyResponse{}, runtime.NewResponseError(resp)
+ return serviceClientGetUserDelegationKeyResponse{}, runtime.NewResponseError(resp)
}
return client.getUserDelegationKeyHandleResponse(resp)
}
// getUserDelegationKeyCreateRequest creates the GetUserDelegationKey request.
-func (client *serviceClient) getUserDelegationKeyCreateRequest(ctx context.Context, keyInfo KeyInfo, options *ServiceGetUserDelegationKeyOptions) (*policy.Request, error) {
- req, err := runtime.NewRequest(ctx, http.MethodPost, client.con.Endpoint())
+func (client *serviceClient) getUserDelegationKeyCreateRequest(ctx context.Context, keyInfo KeyInfo, options *serviceClientGetUserDelegationKeyOptions) (*policy.Request, error) {
+ req, err := runtime.NewRequest(ctx, http.MethodPost, client.endpoint)
if err != nil {
return nil, err
}
@@ -316,7 +335,7 @@ func (client *serviceClient) getUserDelegationKeyCreateRequest(ctx context.Conte
reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
- req.Raw().Header.Set("x-ms-version", "2019-12-12")
+ req.Raw().Header.Set("x-ms-version", "2020-10-02")
if options != nil && options.RequestID != nil {
req.Raw().Header.Set("x-ms-client-request-id", *options.RequestID)
}
@@ -325,8 +344,8 @@ func (client *serviceClient) getUserDelegationKeyCreateRequest(ctx context.Conte
}
// getUserDelegationKeyHandleResponse handles the GetUserDelegationKey response.
-func (client *serviceClient) getUserDelegationKeyHandleResponse(resp *http.Response) (ServiceGetUserDelegationKeyResponse, error) {
- result := ServiceGetUserDelegationKeyResponse{RawResponse: resp}
+func (client *serviceClient) getUserDelegationKeyHandleResponse(resp *http.Response) (serviceClientGetUserDelegationKeyResponse, error) {
+ result := serviceClientGetUserDelegationKeyResponse{RawResponse: resp}
if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
result.ClientRequestID = &val
}
@@ -339,33 +358,35 @@ func (client *serviceClient) getUserDelegationKeyHandleResponse(resp *http.Respo
if val := resp.Header.Get("Date"); val != "" {
date, err := time.Parse(time.RFC1123, val)
if err != nil {
- return ServiceGetUserDelegationKeyResponse{}, err
+ return serviceClientGetUserDelegationKeyResponse{}, err
}
result.Date = &date
}
if err := runtime.UnmarshalAsXML(resp, &result.UserDelegationKey); err != nil {
- return ServiceGetUserDelegationKeyResponse{}, err
+ return serviceClientGetUserDelegationKeyResponse{}, err
}
return result, nil
}
// ListContainersSegment - The List Containers Segment operation returns a list of the containers under the specified account
-// If the operation fails it returns the *StorageError error type.
-func (client *serviceClient) ListContainersSegment(options *ServiceListContainersSegmentOptions) *ServiceListContainersSegmentPager {
- return &ServiceListContainersSegmentPager{
+// If the operation fails it returns an *azcore.ResponseError type.
+// options - serviceClientListContainersSegmentOptions contains the optional parameters for the serviceClient.ListContainersSegment
+// method.
+func (client *serviceClient) ListContainersSegment(options *serviceClientListContainersSegmentOptions) *serviceClientListContainersSegmentPager {
+ return &serviceClientListContainersSegmentPager{
client: client,
requester: func(ctx context.Context) (*policy.Request, error) {
return client.listContainersSegmentCreateRequest(ctx, options)
},
- advancer: func(ctx context.Context, resp ServiceListContainersSegmentResponse) (*policy.Request, error) {
+ advancer: func(ctx context.Context, resp serviceClientListContainersSegmentResponse) (*policy.Request, error) {
return runtime.NewRequest(ctx, http.MethodGet, *resp.ListContainersSegmentResponse.NextMarker)
},
}
}
// listContainersSegmentCreateRequest creates the ListContainersSegment request.
-func (client *serviceClient) listContainersSegmentCreateRequest(ctx context.Context, options *ServiceListContainersSegmentOptions) (*policy.Request, error) {
- req, err := runtime.NewRequest(ctx, http.MethodGet, client.con.Endpoint())
+func (client *serviceClient) listContainersSegmentCreateRequest(ctx context.Context, options *serviceClientListContainersSegmentOptions) (*policy.Request, error) {
+ req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint)
if err != nil {
return nil, err
}
@@ -387,7 +408,7 @@ func (client *serviceClient) listContainersSegmentCreateRequest(ctx context.Cont
reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
- req.Raw().Header.Set("x-ms-version", "2019-12-12")
+ req.Raw().Header.Set("x-ms-version", "2020-10-02")
if options != nil && options.RequestID != nil {
req.Raw().Header.Set("x-ms-client-request-id", *options.RequestID)
}
@@ -396,8 +417,8 @@ func (client *serviceClient) listContainersSegmentCreateRequest(ctx context.Cont
}
// listContainersSegmentHandleResponse handles the ListContainersSegment response.
-func (client *serviceClient) listContainersSegmentHandleResponse(resp *http.Response) (ServiceListContainersSegmentResponse, error) {
- result := ServiceListContainersSegmentResponse{RawResponse: resp}
+func (client *serviceClient) listContainersSegmentHandleResponse(resp *http.Response) (serviceClientListContainersSegmentResponse, error) {
+ result := serviceClientListContainersSegmentResponse{RawResponse: resp}
if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
result.ClientRequestID = &val
}
@@ -408,32 +429,34 @@ func (client *serviceClient) listContainersSegmentHandleResponse(resp *http.Resp
result.Version = &val
}
if err := runtime.UnmarshalAsXML(resp, &result.ListContainersSegmentResponse); err != nil {
- return ServiceListContainersSegmentResponse{}, err
+ return serviceClientListContainersSegmentResponse{}, err
}
return result, nil
}
-// SetProperties - Sets properties for a storage account's Blob service endpoint, including properties for Storage Analytics and CORS (Cross-Origin Resource
-// Sharing) rules
-// If the operation fails it returns the *StorageError error type.
-func (client *serviceClient) SetProperties(ctx context.Context, storageServiceProperties StorageServiceProperties, options *ServiceSetPropertiesOptions) (ServiceSetPropertiesResponse, error) {
+// SetProperties - Sets properties for a storage account's Blob service endpoint, including properties for Storage Analytics
+// and CORS (Cross-Origin Resource Sharing) rules
+// If the operation fails it returns an *azcore.ResponseError type.
+// storageServiceProperties - The StorageService properties.
+// options - serviceClientSetPropertiesOptions contains the optional parameters for the serviceClient.SetProperties method.
+func (client *serviceClient) SetProperties(ctx context.Context, storageServiceProperties StorageServiceProperties, options *serviceClientSetPropertiesOptions) (serviceClientSetPropertiesResponse, error) {
req, err := client.setPropertiesCreateRequest(ctx, storageServiceProperties, options)
if err != nil {
- return ServiceSetPropertiesResponse{}, err
+ return serviceClientSetPropertiesResponse{}, err
}
- resp, err := client.con.Pipeline().Do(req)
+ resp, err := client.pl.Do(req)
if err != nil {
- return ServiceSetPropertiesResponse{}, err
+ return serviceClientSetPropertiesResponse{}, err
}
if !runtime.HasStatusCode(resp, http.StatusAccepted) {
- return ServiceSetPropertiesResponse{}, runtime.NewResponseError(resp)
+ return serviceClientSetPropertiesResponse{}, runtime.NewResponseError(resp)
}
return client.setPropertiesHandleResponse(resp)
}
// setPropertiesCreateRequest creates the SetProperties request.
-func (client *serviceClient) setPropertiesCreateRequest(ctx context.Context, storageServiceProperties StorageServiceProperties, options *ServiceSetPropertiesOptions) (*policy.Request, error) {
- req, err := runtime.NewRequest(ctx, http.MethodPut, client.con.Endpoint())
+func (client *serviceClient) setPropertiesCreateRequest(ctx context.Context, storageServiceProperties StorageServiceProperties, options *serviceClientSetPropertiesOptions) (*policy.Request, error) {
+ req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint)
if err != nil {
return nil, err
}
@@ -444,7 +467,7 @@ func (client *serviceClient) setPropertiesCreateRequest(ctx context.Context, sto
reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
- req.Raw().Header.Set("x-ms-version", "2019-12-12")
+ req.Raw().Header.Set("x-ms-version", "2020-10-02")
if options != nil && options.RequestID != nil {
req.Raw().Header.Set("x-ms-client-request-id", *options.RequestID)
}
@@ -453,8 +476,8 @@ func (client *serviceClient) setPropertiesCreateRequest(ctx context.Context, sto
}
// setPropertiesHandleResponse handles the SetProperties response.
-func (client *serviceClient) setPropertiesHandleResponse(resp *http.Response) (ServiceSetPropertiesResponse, error) {
- result := ServiceSetPropertiesResponse{RawResponse: resp}
+func (client *serviceClient) setPropertiesHandleResponse(resp *http.Response) (serviceClientSetPropertiesResponse, error) {
+ result := serviceClientSetPropertiesResponse{RawResponse: resp}
if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
result.ClientRequestID = &val
}
@@ -468,25 +491,30 @@ func (client *serviceClient) setPropertiesHandleResponse(resp *http.Response) (S
}
// SubmitBatch - The Batch operation allows multiple API calls to be embedded into a single HTTP request.
-// If the operation fails it returns the *StorageError error type.
-func (client *serviceClient) SubmitBatch(ctx context.Context, contentLength int64, multipartContentType string, body io.ReadSeekCloser, options *ServiceSubmitBatchOptions) (ServiceSubmitBatchResponse, error) {
+// If the operation fails it returns an *azcore.ResponseError type.
+// contentLength - The length of the request.
+// multipartContentType - Required. The value of this header must be multipart/mixed with a batch boundary. Example header
+// value: multipart/mixed; boundary=batch_
+// body - Initial data
+// options - serviceClientSubmitBatchOptions contains the optional parameters for the serviceClient.SubmitBatch method.
+func (client *serviceClient) SubmitBatch(ctx context.Context, contentLength int64, multipartContentType string, body io.ReadSeekCloser, options *serviceClientSubmitBatchOptions) (serviceClientSubmitBatchResponse, error) {
req, err := client.submitBatchCreateRequest(ctx, contentLength, multipartContentType, body, options)
if err != nil {
- return ServiceSubmitBatchResponse{}, err
+ return serviceClientSubmitBatchResponse{}, err
}
- resp, err := client.con.Pipeline().Do(req)
+ resp, err := client.pl.Do(req)
if err != nil {
- return ServiceSubmitBatchResponse{}, err
+ return serviceClientSubmitBatchResponse{}, err
}
if !runtime.HasStatusCode(resp, http.StatusOK) {
- return ServiceSubmitBatchResponse{}, runtime.NewResponseError(resp)
+ return serviceClientSubmitBatchResponse{}, runtime.NewResponseError(resp)
}
return client.submitBatchHandleResponse(resp)
}
// submitBatchCreateRequest creates the SubmitBatch request.
-func (client *serviceClient) submitBatchCreateRequest(ctx context.Context, contentLength int64, multipartContentType string, body io.ReadSeekCloser, options *ServiceSubmitBatchOptions) (*policy.Request, error) {
- req, err := runtime.NewRequest(ctx, http.MethodPost, client.con.Endpoint())
+func (client *serviceClient) submitBatchCreateRequest(ctx context.Context, contentLength int64, multipartContentType string, body io.ReadSeekCloser, options *serviceClientSubmitBatchOptions) (*policy.Request, error) {
+ req, err := runtime.NewRequest(ctx, http.MethodPost, client.endpoint)
if err != nil {
return nil, err
}
@@ -499,7 +527,7 @@ func (client *serviceClient) submitBatchCreateRequest(ctx context.Context, conte
runtime.SkipBodyDownload(req)
req.Raw().Header.Set("Content-Length", strconv.FormatInt(contentLength, 10))
req.Raw().Header.Set("Content-Type", multipartContentType)
- req.Raw().Header.Set("x-ms-version", "2019-12-12")
+ req.Raw().Header.Set("x-ms-version", "2020-10-02")
if options != nil && options.RequestID != nil {
req.Raw().Header.Set("x-ms-client-request-id", *options.RequestID)
}
@@ -508,8 +536,8 @@ func (client *serviceClient) submitBatchCreateRequest(ctx context.Context, conte
}
// submitBatchHandleResponse handles the SubmitBatch response.
-func (client *serviceClient) submitBatchHandleResponse(resp *http.Response) (ServiceSubmitBatchResponse, error) {
- result := ServiceSubmitBatchResponse{RawResponse: resp}
+func (client *serviceClient) submitBatchHandleResponse(resp *http.Response) (serviceClientSubmitBatchResponse, error) {
+ result := serviceClientSubmitBatchResponse{RawResponse: resp}
if val := resp.Header.Get("Content-Type"); val != "" {
result.ContentType = &val
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_time_rfc1123.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_time_rfc1123.go
index a95cc9b132f..42726159b6f 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_time_rfc1123.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_time_rfc1123.go
@@ -1,5 +1,5 @@
-//go:build go1.16
-// +build go1.16
+//go:build go1.18
+// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_time_rfc3339.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_time_rfc3339.go
index 79dc2501e95..c51d8d78c12 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_time_rfc3339.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_time_rfc3339.go
@@ -1,5 +1,5 @@
-//go:build go1.16
-// +build go1.16
+//go:build go1.18
+// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_xml_helper.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_xml_helper.go
index 93b99d259ff..1cf97387de2 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_xml_helper.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_xml_helper.go
@@ -1,5 +1,5 @@
-//go:build go1.16
-// +build go1.16
+//go:build go1.18
+// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/LICENSE b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/LICENSE
new file mode 100644
index 00000000000..3d8b93bc798
--- /dev/null
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/LICENSE
@@ -0,0 +1,21 @@
+ MIT License
+
+ Copyright (c) Microsoft Corporation.
+
+ Permission is hereby granted, free of charge, to any person obtaining a copy
+ of this software and associated documentation files (the "Software"), to deal
+ in the Software without restriction, including without limitation the rights
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ copies of the Software, and to permit persons to whom the Software is
+ furnished to do so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be included in all
+ copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ SOFTWARE
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache/cache.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache/cache.go
new file mode 100644
index 00000000000..259ca6d56f4
--- /dev/null
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache/cache.go
@@ -0,0 +1,39 @@
+// Copyright (c) Microsoft Corporation.
+// Licensed under the MIT license.
+
+/*
+Package cache allows third parties to implement external storage for caching token data
+for distributed systems or multiple local applications access.
+
+The data stored and extracted will represent the entire cache. Therefore it is recommended
+one msal instance per user. This data is considered opaque and there are no guarantees to
+implementers on the format being passed.
+*/
+package cache
+
+// Marshaler marshals data from an internal cache to bytes that can be stored.
+type Marshaler interface {
+ Marshal() ([]byte, error)
+}
+
+// Unmarshaler unmarshals data from a storage medium into the internal cache, overwriting it.
+type Unmarshaler interface {
+ Unmarshal([]byte) error
+}
+
+// Serializer can serialize the cache to binary or from binary into the cache.
+type Serializer interface {
+ Marshaler
+ Unmarshaler
+}
+
+// ExportReplace is used export or replace what is in the cache.
+type ExportReplace interface {
+ // Replace replaces the cache with what is in external storage.
+ // key is the suggested key which can be used for partioning the cache
+ Replace(cache Unmarshaler, key string)
+ // Export writes the binary representation of the cache (cache.Marshal()) to
+ // external storage. This is considered opaque.
+ // key is the suggested key which can be used for partioning the cache
+ Export(cache Marshaler, key string)
+}
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential/confidential.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential/confidential.go
new file mode 100644
index 00000000000..11a33de73a1
--- /dev/null
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential/confidential.go
@@ -0,0 +1,510 @@
+// Copyright (c) Microsoft Corporation.
+// Licensed under the MIT license.
+
+/*
+Package confidential provides a client for authentication of "confidential" applications.
+A "confidential" application is defined as an app that run on servers. They are considered
+difficult to access and for that reason capable of keeping an application secret.
+Confidential clients can hold configuration-time secrets.
+*/
+package confidential
+
+import (
+ "context"
+ "crypto"
+ "crypto/rsa"
+ "crypto/x509"
+ "encoding/base64"
+ "encoding/pem"
+ "errors"
+ "fmt"
+ "net/url"
+
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache"
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base"
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/exported"
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth"
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops"
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens"
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority"
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared"
+)
+
+/*
+Design note:
+
+confidential.Client uses base.Client as an embedded type. base.Client statically assigns its attributes
+during creation. As it doesn't have any pointers in it, anything borrowed from it, such as
+Base.AuthParams is a copy that is free to be manipulated here.
+
+Duplicate Calls shared between public.Client and this package:
+There is some duplicate call options provided here that are the same as in public.Client . This
+is a design choices. Go proverb(https://www.youtube.com/watch?v=PAAkCSZUG1c&t=9m28s):
+"a little copying is better than a little dependency". Yes, we could have another package with
+shared options (fail). That divides like 2 options from all others which makes the user look
+through more docs. We can have all clients in one package, but I think separate packages
+here makes for better naming (public.Client vs client.PublicClient). So I chose a little
+duplication.
+
+.Net People, Take note on X509:
+This uses x509.Certificates and private keys. x509 does not store private keys. .Net
+has some x509.Certificate2 thing that has private keys, but that is just some bullcrap that .Net
+added, it doesn't exist in real life. Seriously, "x509.Certificate2", bahahahaha. As such I've
+put a PEM decoder into here.
+*/
+
+// TODO(msal): This should have example code for each method on client using Go's example doc framework.
+// base usage details should be include in the package documentation.
+
+// AuthResult contains the results of one token acquisition operation.
+// For details see https://aka.ms/msal-net-authenticationresult
+type AuthResult = base.AuthResult
+
+type Account = shared.Account
+
+// CertFromPEM converts a PEM file (.pem or .key) for use with NewCredFromCert(). The file
+// must contain the public certificate and the private key. If a PEM block is encrypted and
+// password is not an empty string, it attempts to decrypt the PEM blocks using the password.
+// Multiple certs are due to certificate chaining for use cases like TLS that sign from root to leaf.
+func CertFromPEM(pemData []byte, password string) ([]*x509.Certificate, crypto.PrivateKey, error) {
+ var certs []*x509.Certificate
+ var priv crypto.PrivateKey
+ for {
+ block, rest := pem.Decode(pemData)
+ if block == nil {
+ break
+ }
+
+ //nolint:staticcheck // x509.IsEncryptedPEMBlock and x509.DecryptPEMBlock are deprecated. They are used here only to support a usecase.
+ if x509.IsEncryptedPEMBlock(block) {
+ b, err := x509.DecryptPEMBlock(block, []byte(password))
+ if err != nil {
+ return nil, nil, fmt.Errorf("could not decrypt encrypted PEM block: %v", err)
+ }
+ block, _ = pem.Decode(b)
+ if block == nil {
+ return nil, nil, fmt.Errorf("encounter encrypted PEM block that did not decode")
+ }
+ }
+
+ switch block.Type {
+ case "CERTIFICATE":
+ cert, err := x509.ParseCertificate(block.Bytes)
+ if err != nil {
+ return nil, nil, fmt.Errorf("block labelled 'CERTIFICATE' could not be parsed by x509: %v", err)
+ }
+ certs = append(certs, cert)
+ case "PRIVATE KEY":
+ if priv != nil {
+ return nil, nil, errors.New("found multiple private key blocks")
+ }
+
+ var err error
+ priv, err = x509.ParsePKCS8PrivateKey(block.Bytes)
+ if err != nil {
+ return nil, nil, fmt.Errorf("could not decode private key: %v", err)
+ }
+ case "RSA PRIVATE KEY":
+ if priv != nil {
+ return nil, nil, errors.New("found multiple private key blocks")
+ }
+ var err error
+ priv, err = x509.ParsePKCS1PrivateKey(block.Bytes)
+ if err != nil {
+ return nil, nil, fmt.Errorf("could not decode private key: %v", err)
+ }
+ }
+ pemData = rest
+ }
+
+ if len(certs) == 0 {
+ return nil, nil, fmt.Errorf("no certificates found")
+ }
+
+ if priv == nil {
+ return nil, nil, fmt.Errorf("no private key found")
+ }
+
+ return certs, priv, nil
+}
+
+// AssertionRequestOptions has required information for client assertion claims
+type AssertionRequestOptions = exported.AssertionRequestOptions
+
+// Credential represents the credential used in confidential client flows.
+type Credential struct {
+ secret string
+
+ cert *x509.Certificate
+ key crypto.PrivateKey
+ x5c []string
+
+ assertionCallback func(context.Context, AssertionRequestOptions) (string, error)
+
+ tokenProvider func(context.Context, TokenProviderParameters) (TokenProviderResult, error)
+}
+
+// toInternal returns the accesstokens.Credential that is used internally. The current structure of the
+// code requires that client.go, requests.go and confidential.go share a credential type without
+// having import recursion. That requires the type used between is in a shared package. Therefore
+// we have this.
+func (c Credential) toInternal() (*accesstokens.Credential, error) {
+ if c.secret != "" {
+ return &accesstokens.Credential{Secret: c.secret}, nil
+ }
+ if c.cert != nil {
+ if c.key == nil {
+ return nil, errors.New("missing private key for certificate")
+ }
+ return &accesstokens.Credential{Cert: c.cert, Key: c.key, X5c: c.x5c}, nil
+ }
+ if c.key != nil {
+ return nil, errors.New("missing certificate for private key")
+ }
+ if c.assertionCallback != nil {
+ return &accesstokens.Credential{AssertionCallback: c.assertionCallback}, nil
+ }
+ if c.tokenProvider != nil {
+ return &accesstokens.Credential{TokenProvider: c.tokenProvider}, nil
+ }
+ return nil, errors.New("invalid credential")
+}
+
+// NewCredFromSecret creates a Credential from a secret.
+func NewCredFromSecret(secret string) (Credential, error) {
+ if secret == "" {
+ return Credential{}, errors.New("secret can't be empty string")
+ }
+ return Credential{secret: secret}, nil
+}
+
+// NewCredFromAssertion creates a Credential from a signed assertion.
+//
+// Deprecated: a Credential created by this function can't refresh the
+// assertion when it expires. Use NewCredFromAssertionCallback instead.
+func NewCredFromAssertion(assertion string) (Credential, error) {
+ if assertion == "" {
+ return Credential{}, errors.New("assertion can't be empty string")
+ }
+ return NewCredFromAssertionCallback(func(context.Context, AssertionRequestOptions) (string, error) { return assertion, nil }), nil
+}
+
+// NewCredFromAssertionCallback creates a Credential that invokes a callback to get assertions
+// authenticating the application. The callback must be thread safe.
+func NewCredFromAssertionCallback(callback func(context.Context, AssertionRequestOptions) (string, error)) Credential {
+ return Credential{assertionCallback: callback}
+}
+
+// NewCredFromCert creates a Credential from an x509.Certificate and an RSA private key.
+// CertFromPEM() can be used to get these values from a PEM file.
+func NewCredFromCert(cert *x509.Certificate, key crypto.PrivateKey) Credential {
+ cred, _ := NewCredFromCertChain([]*x509.Certificate{cert}, key)
+ return cred
+}
+
+// NewCredFromCertChain creates a Credential from a chain of x509.Certificates and an RSA private key
+// as returned by CertFromPEM().
+func NewCredFromCertChain(certs []*x509.Certificate, key crypto.PrivateKey) (Credential, error) {
+ cred := Credential{key: key}
+ k, ok := key.(*rsa.PrivateKey)
+ if !ok {
+ return cred, errors.New("key must be an RSA key")
+ }
+ for _, cert := range certs {
+ if cert == nil {
+ // not returning an error here because certs may still contain a sufficient cert/key pair
+ continue
+ }
+ certKey, ok := cert.PublicKey.(*rsa.PublicKey)
+ if ok && k.E == certKey.E && k.N.Cmp(certKey.N) == 0 {
+ // We know this is the signing cert because its public key matches the given private key.
+ // This cert must be first in x5c.
+ cred.cert = cert
+ cred.x5c = append([]string{base64.StdEncoding.EncodeToString(cert.Raw)}, cred.x5c...)
+ } else {
+ cred.x5c = append(cred.x5c, base64.StdEncoding.EncodeToString(cert.Raw))
+ }
+ }
+ if cred.cert == nil {
+ return cred, errors.New("key doesn't match any certificate")
+ }
+ return cred, nil
+}
+
+// TokenProviderParameters is the authentication parameters passed to token providers
+type TokenProviderParameters = exported.TokenProviderParameters
+
+// TokenProviderResult is the authentication result returned by custom token providers
+type TokenProviderResult = exported.TokenProviderResult
+
+// NewCredFromTokenProvider creates a Credential from a function that provides access tokens. The function
+// must be concurrency safe. This is intended only to allow the Azure SDK to cache MSI tokens. It isn't
+// useful to applications in general because the token provider must implement all authentication logic.
+func NewCredFromTokenProvider(provider func(context.Context, TokenProviderParameters) (TokenProviderResult, error)) Credential {
+ return Credential{tokenProvider: provider}
+}
+
+// AutoDetectRegion instructs MSAL Go to auto detect region for Azure regional token service.
+func AutoDetectRegion() string {
+ return "TryAutoDetect"
+}
+
+// Client is a representation of authentication client for confidential applications as defined in the
+// package doc. A new Client should be created PER SERVICE USER.
+// For more information, visit https://docs.microsoft.com/azure/active-directory/develop/msal-client-applications
+type Client struct {
+ base base.Client
+
+ cred *accesstokens.Credential
+
+ // userID is some unique identifier for a user. It actually isn't used by us at all, it
+ // simply acts as another hint that a confidential.Client is for a single user.
+ userID string
+}
+
+// Options are optional settings for New(). These options are set using various functions
+// returning Option calls.
+type Options struct {
+ // Accessor controls cache persistence.
+ // By default there is no cache persistence. This can be set using the WithAccessor() option.
+ Accessor cache.ExportReplace
+
+ // The host of the Azure Active Directory authority.
+ // The default is https://login.microsoftonline.com/common. This can be changed using the
+ // WithAuthority() option.
+ Authority string
+
+ // The HTTP client used for making requests.
+ // It defaults to a shared http.Client.
+ HTTPClient ops.HTTPClient
+
+ // SendX5C specifies if x5c claim(public key of the certificate) should be sent to STS.
+ SendX5C bool
+
+ // Instructs MSAL Go to use an Azure regional token service with sepcified AzureRegion.
+ AzureRegion string
+}
+
+func (o Options) validate() error {
+ u, err := url.Parse(o.Authority)
+ if err != nil {
+ return fmt.Errorf("the Authority(%s) does not parse as a valid URL", o.Authority)
+ }
+ if u.Scheme != "https" {
+ return fmt.Errorf("the Authority(%s) does not appear to use https", o.Authority)
+ }
+ return nil
+}
+
+// Option is an optional argument to New().
+type Option func(o *Options)
+
+// WithAuthority allows you to provide a custom authority for use in the client.
+func WithAuthority(authority string) Option {
+ return func(o *Options) {
+ o.Authority = authority
+ }
+}
+
+// WithAccessor provides a cache accessor that will read and write to some externally managed cache
+// that may or may not be shared with other applications.
+func WithAccessor(accessor cache.ExportReplace) Option {
+ return func(o *Options) {
+ o.Accessor = accessor
+ }
+}
+
+// WithHTTPClient allows for a custom HTTP client to be set.
+func WithHTTPClient(httpClient ops.HTTPClient) Option {
+ return func(o *Options) {
+ o.HTTPClient = httpClient
+ }
+}
+
+// WithX5C specifies if x5c claim(public key of the certificate) should be sent to STS to enable Subject Name Issuer Authentication.
+func WithX5C() Option {
+ return func(o *Options) {
+ o.SendX5C = true
+ }
+}
+
+// WithAzureRegion sets the region(preferred) or Confidential.AutoDetectRegion() for auto detecting region.
+// Region names as per https://azure.microsoft.com/en-ca/global-infrastructure/geographies/.
+// See https://aka.ms/region-map for more details on region names.
+// The region value should be short region name for the region where the service is deployed.
+// For example "centralus" is short name for region Central US.
+// Not all auth flows can use the regional token service.
+// Service To Service (client credential flow) tokens can be obtained from the regional service.
+// Requires configuration at the tenant level.
+// Auto-detection works on a limited number of Azure artifacts (VMs, Azure functions).
+// If auto-detection fails, the non-regional endpoint will be used.
+// If an invalid region name is provided, the non-regional endpoint MIGHT be used or the token request MIGHT fail.
+func WithAzureRegion(val string) Option {
+ return func(o *Options) {
+ o.AzureRegion = val
+ }
+}
+
+// New is the constructor for Client. userID is the unique identifier of the user this client
+// will store credentials for (a Client is per user). clientID is the Azure clientID and cred is
+// the type of credential to use.
+func New(clientID string, cred Credential, options ...Option) (Client, error) {
+ internalCred, err := cred.toInternal()
+ if err != nil {
+ return Client{}, err
+ }
+
+ opts := Options{
+ Authority: base.AuthorityPublicCloud,
+ HTTPClient: shared.DefaultClient,
+ }
+
+ for _, o := range options {
+ o(&opts)
+ }
+ if err := opts.validate(); err != nil {
+ return Client{}, err
+ }
+
+ baseOpts := []base.Option{
+ base.WithCacheAccessor(opts.Accessor),
+ base.WithRegionDetection(opts.AzureRegion),
+ base.WithX5C(opts.SendX5C),
+ }
+ if cred.tokenProvider != nil {
+ // The caller will handle all details of authentication, using Client only as a token cache.
+ // Declaring the authority host known prevents unnecessary metadata discovery requests. (The
+ // authority is irrelevant to Client and friends because the token provider is responsible
+ // for authentication.)
+ parsed, err := url.Parse(opts.Authority)
+ if err != nil {
+ return Client{}, errors.New("invalid authority")
+ }
+ baseOpts = append(baseOpts, base.WithKnownAuthorityHosts([]string{parsed.Hostname()}))
+ }
+ base, err := base.New(clientID, opts.Authority, oauth.New(opts.HTTPClient), baseOpts...)
+ if err != nil {
+ return Client{}, err
+ }
+
+ return Client{base: base, cred: internalCred}, nil
+}
+
+// UserID is the unique user identifier this client if for.
+func (cca Client) UserID() string {
+ return cca.userID
+}
+
+// AuthCodeURL creates a URL used to acquire an authorization code. Users need to call CreateAuthorizationCodeURLParameters and pass it in.
+func (cca Client) AuthCodeURL(ctx context.Context, clientID, redirectURI string, scopes []string) (string, error) {
+ return cca.base.AuthCodeURL(ctx, clientID, redirectURI, scopes, cca.base.AuthParams)
+}
+
+// AcquireTokenSilentOptions are all the optional settings to an AcquireTokenSilent() call.
+// These are set by using various AcquireTokenSilentOption functions.
+type AcquireTokenSilentOptions struct {
+ // Account represents the account to use. To set, use the WithSilentAccount() option.
+ Account Account
+}
+
+// AcquireTokenSilentOption changes options inside AcquireTokenSilentOptions used in .AcquireTokenSilent().
+type AcquireTokenSilentOption func(a *AcquireTokenSilentOptions)
+
+// WithSilentAccount uses the passed account during an AcquireTokenSilent() call.
+func WithSilentAccount(account Account) AcquireTokenSilentOption {
+ return func(a *AcquireTokenSilentOptions) {
+ a.Account = account
+ }
+}
+
+// AcquireTokenSilent acquires a token from either the cache or using a refresh token.
+func (cca Client) AcquireTokenSilent(ctx context.Context, scopes []string, options ...AcquireTokenSilentOption) (AuthResult, error) {
+ opts := AcquireTokenSilentOptions{}
+ for _, o := range options {
+ o(&opts)
+ }
+ var isAppCache bool
+ if opts.Account.IsZero() {
+ isAppCache = true
+ }
+
+ silentParameters := base.AcquireTokenSilentParameters{
+ Scopes: scopes,
+ Account: opts.Account,
+ RequestType: accesstokens.ATConfidential,
+ Credential: cca.cred,
+ IsAppCache: isAppCache,
+ }
+
+ return cca.base.AcquireTokenSilent(ctx, silentParameters)
+}
+
+// AcquireTokenByAuthCodeOptions contains the optional parameters used to acquire an access token using the authorization code flow.
+type AcquireTokenByAuthCodeOptions struct {
+ Challenge string
+}
+
+// AcquireTokenByAuthCodeOption changes options inside AcquireTokenByAuthCodeOptions used in .AcquireTokenByAuthCode().
+type AcquireTokenByAuthCodeOption func(a *AcquireTokenByAuthCodeOptions)
+
+// WithChallenge allows you to provide a challenge for the .AcquireTokenByAuthCode() call.
+func WithChallenge(challenge string) AcquireTokenByAuthCodeOption {
+ return func(a *AcquireTokenByAuthCodeOptions) {
+ a.Challenge = challenge
+ }
+}
+
+// AcquireTokenByAuthCode is a request to acquire a security token from the authority, using an authorization code.
+// The specified redirect URI must be the same URI that was used when the authorization code was requested.
+func (cca Client) AcquireTokenByAuthCode(ctx context.Context, code string, redirectURI string, scopes []string, options ...AcquireTokenByAuthCodeOption) (AuthResult, error) {
+ opts := AcquireTokenByAuthCodeOptions{}
+ for _, o := range options {
+ o(&opts)
+ }
+
+ params := base.AcquireTokenAuthCodeParameters{
+ Scopes: scopes,
+ Code: code,
+ Challenge: opts.Challenge,
+ AppType: accesstokens.ATConfidential,
+ Credential: cca.cred, // This setting differs from public.Client.AcquireTokenByAuthCode
+ RedirectURI: redirectURI,
+ }
+
+ return cca.base.AcquireTokenByAuthCode(ctx, params)
+}
+
+// AcquireTokenByCredential acquires a security token from the authority, using the client credentials grant.
+func (cca Client) AcquireTokenByCredential(ctx context.Context, scopes []string) (AuthResult, error) {
+ authParams := cca.base.AuthParams
+ authParams.Scopes = scopes
+ authParams.AuthorizationType = authority.ATClientCredentials
+
+ token, err := cca.base.Token.Credential(ctx, authParams, cca.cred)
+ if err != nil {
+ return AuthResult{}, err
+ }
+ return cca.base.AuthResultFromToken(ctx, authParams, token, true)
+}
+
+// AcquireTokenOnBehalfOf acquires a security token for an app using middle tier apps access token.
+// Refer https://docs.microsoft.com/en-us/azure/active-directory/develop/v2-oauth2-on-behalf-of-flow.
+func (cca Client) AcquireTokenOnBehalfOf(ctx context.Context, userAssertion string, scopes []string) (AuthResult, error) {
+ params := base.AcquireTokenOnBehalfOfParameters{
+ Scopes: scopes,
+ UserAssertion: userAssertion,
+ Credential: cca.cred,
+ }
+ return cca.base.AcquireTokenOnBehalfOf(ctx, params)
+}
+
+// Account gets the account in the token cache with the specified homeAccountID.
+func (cca Client) Account(homeAccountID string) Account {
+ return cca.base.Account(homeAccountID)
+}
+
+// RemoveAccount signs the account out and forgets account from token cache.
+func (cca Client) RemoveAccount(account Account) error {
+ cca.base.RemoveAccount(account)
+ return nil
+}
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors/error_design.md b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors/error_design.md
new file mode 100644
index 00000000000..34a699f4801
--- /dev/null
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors/error_design.md
@@ -0,0 +1,111 @@
+# MSAL Error Design
+
+Author: Abhidnya Patil(abhidnya.patil@microsoft.com)
+
+Contributors:
+
+- John Doak(jdoak@microsoft.com)
+- Keegan Caruso(Keegan.Caruso@microsoft.com)
+- Joel Hendrix(jhendrix@microsoft.com)
+
+## Background
+
+Errors in MSAL are intended for app developers to troubleshoot and not for displaying to end-users.
+
+### Go error handling vs other MSAL languages
+
+Most modern languages use exception based errors. Simply put, you "throw" an exception and it must be caught at some routine in the upper stack or it will eventually crash the program.
+
+Go doesn't use exceptions, instead it relies on multiple return values, one of which can be the builtin error interface type. It is up to the user to decide what to do.
+
+### Go custom error types
+
+Errors can be created in Go by simply using errors.New() or fmt.Errorf() to create an "error".
+
+Custom errors can be created in multiple ways. One of the more robust ways is simply to satisfy the error interface:
+
+```go
+type MyCustomErr struct {
+ Msg string
+}
+func (m MyCustomErr) Error() string { // This implements "error"
+ return m.Msg
+}
+```
+
+### MSAL Error Goals
+
+- Provide diagnostics to the user and for tickets that can be used to track down bugs or client misconfigurations
+- Detect errors that are transitory and can be retried
+- Allow the user to identify certain errors that the program can respond to, such a informing the user for the need to do an enrollment
+
+## Implementing Client Side Errors
+
+Client side errors indicate a misconfiguration or passing of bad arguments that is non-recoverable. Retrying isn't possible.
+
+These errors can simply be standard Go errors created by errors.New() or fmt.Errorf(). If down the line we need a custom error, we can introduce it, but for now the error messages just need to be clear on what the issue was.
+
+## Implementing Service Side Errors
+
+Service side errors occur when an external RPC responds either with an HTTP error code or returns a message that includes an error.
+
+These errors can be transitory (please slow down) or permanent (HTTP 404). To provide our diagnostic goals, we require the ability to differentiate these errors from other errors.
+
+The current implementation includes a specialized type that captures any error from the server:
+
+```go
+// CallErr represents an HTTP call error. Has a Verbose() method that allows getting the
+// http.Request and Response objects. Implements error.
+type CallErr struct {
+ Req *http.Request
+ Resp *http.Response
+ Err error
+}
+
+// Errors implements error.Error().
+func (e CallErr) Error() string {
+ return e.Err.Error()
+}
+
+// Verbose prints a versbose error message with the request or response.
+func (e CallErr) Verbose() string {
+ e.Resp.Request = nil // This brings in a bunch of TLS crap we don't need
+ e.Resp.TLS = nil // Same
+ return fmt.Sprintf("%s:\nRequest:\n%s\nResponse:\n%s", e.Err, prettyConf.Sprint(e.Req), prettyConf.Sprint(e.Resp))
+}
+```
+
+A user will always receive the most concise error we provide. They can tell if it is a server side error using Go error package:
+
+```go
+var callErr CallErr
+if errors.As(err, &callErr) {
+ ...
+}
+```
+
+We provide a Verbose() function that can retrieve the most verbose message from any error we provide:
+
+```go
+fmt.Println(errors.Verbose(err))
+```
+
+If further differentiation is required, we can add custom errors that use Go error wrapping on top of CallErr to achieve our diagnostic goals (such as detecting when to retry a call due to transient errors).
+
+CallErr is always thrown from the comm package (which handles all http requests) and looks similar to:
+
+```go
+return nil, errors.CallErr{
+ Req: req,
+ Resp: reply,
+ Err: fmt.Errorf("http call(%s)(%s) error: reply status code was %d:\n%s", req.URL.String(), req.Method, reply.StatusCode, ErrorResponse), //ErrorResponse is the json body extracted from the http response
+ }
+```
+
+## Future Decisions
+
+The ability to retry calls needs to have centralized responsibility. Either the user is doing it or the client is doing it.
+
+If the user should be responsible, our errors package will include a CanRetry() function that will inform the user if the error provided to them is retryable. This is based on the http error code and possibly the type of error that was returned. It would also include a sleep time if the server returned an amount of time to wait.
+
+Otherwise we will do this internally and retries will be left to us.
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors/errors.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors/errors.go
new file mode 100644
index 00000000000..c9b8dbed088
--- /dev/null
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors/errors.go
@@ -0,0 +1,89 @@
+// Copyright (c) Microsoft Corporation.
+// Licensed under the MIT license.
+
+package errors
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "reflect"
+ "strings"
+
+ "github.com/kylelemons/godebug/pretty"
+)
+
+var prettyConf = &pretty.Config{
+ IncludeUnexported: false,
+ SkipZeroFields: true,
+ TrackCycles: true,
+ Formatter: map[reflect.Type]interface{}{
+ reflect.TypeOf((*io.Reader)(nil)).Elem(): func(r io.Reader) string {
+ b, err := io.ReadAll(r)
+ if err != nil {
+ return "could not read io.Reader content"
+ }
+ return string(b)
+ },
+ },
+}
+
+type verboser interface {
+ Verbose() string
+}
+
+// Verbose prints the most verbose error that the error message has.
+func Verbose(err error) string {
+ build := strings.Builder{}
+ for {
+ if err == nil {
+ break
+ }
+ if v, ok := err.(verboser); ok {
+ build.WriteString(v.Verbose())
+ } else {
+ build.WriteString(err.Error())
+ }
+ err = errors.Unwrap(err)
+ }
+ return build.String()
+}
+
+// New is equivalent to errors.New().
+func New(text string) error {
+ return errors.New(text)
+}
+
+// CallErr represents an HTTP call error. Has a Verbose() method that allows getting the
+// http.Request and Response objects. Implements error.
+type CallErr struct {
+ Req *http.Request
+ // Resp contains response body
+ Resp *http.Response
+ Err error
+}
+
+// Errors implements error.Error().
+func (e CallErr) Error() string {
+ return e.Err.Error()
+}
+
+// Verbose prints a versbose error message with the request or response.
+func (e CallErr) Verbose() string {
+ e.Resp.Request = nil // This brings in a bunch of TLS crap we don't need
+ e.Resp.TLS = nil // Same
+ return fmt.Sprintf("%s:\nRequest:\n%s\nResponse:\n%s", e.Err, prettyConf.Sprint(e.Req), prettyConf.Sprint(e.Resp))
+}
+
+// Is reports whether any error in errors chain matches target.
+func Is(err, target error) bool {
+ return errors.Is(err, target)
+}
+
+// As finds the first error in errors chain that matches target,
+// and if so, sets target to that error value and returns true.
+// Otherwise, it returns false.
+func As(err error, target interface{}) bool {
+ return errors.As(err, target)
+}
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/base.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/base.go
new file mode 100644
index 00000000000..a86f06400ba
--- /dev/null
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/base.go
@@ -0,0 +1,406 @@
+// Package base contains a "Base" client that is used by the external public.Client and confidential.Client.
+// Base holds shared attributes that must be available to both clients and methods that act as
+// shared calls.
+package base
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "net/url"
+ "reflect"
+ "strings"
+ "time"
+
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache"
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage"
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth"
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens"
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority"
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared"
+)
+
+const (
+ // AuthorityPublicCloud is the default AAD authority host
+ AuthorityPublicCloud = "https://login.microsoftonline.com/common"
+ scopeSeparator = " "
+)
+
+// manager provides an internal cache. It is defined to allow faking the cache in tests.
+// In all production use it is a *storage.Manager.
+type manager interface {
+ Read(ctx context.Context, authParameters authority.AuthParams, account shared.Account) (storage.TokenResponse, error)
+ Write(authParameters authority.AuthParams, tokenResponse accesstokens.TokenResponse) (shared.Account, error)
+ AllAccounts() []shared.Account
+ Account(homeAccountID string) shared.Account
+ RemoveAccount(account shared.Account, clientID string)
+}
+
+// partitionedManager provides an internal cache. It is defined to allow faking the cache in tests.
+// In all production use it is a *storage.PartitionedManager.
+type partitionedManager interface {
+ Read(ctx context.Context, authParameters authority.AuthParams) (storage.TokenResponse, error)
+ Write(authParameters authority.AuthParams, tokenResponse accesstokens.TokenResponse) (shared.Account, error)
+}
+
+type noopCacheAccessor struct{}
+
+func (n noopCacheAccessor) Replace(cache cache.Unmarshaler, key string) {}
+func (n noopCacheAccessor) Export(cache cache.Marshaler, key string) {}
+
+// AcquireTokenSilentParameters contains the parameters to acquire a token silently (from cache).
+type AcquireTokenSilentParameters struct {
+ Scopes []string
+ Account shared.Account
+ RequestType accesstokens.AppType
+ Credential *accesstokens.Credential
+ IsAppCache bool
+ UserAssertion string
+ AuthorizationType authority.AuthorizeType
+}
+
+// AcquireTokenAuthCodeParameters contains the parameters required to acquire an access token using the auth code flow.
+// To use PKCE, set the CodeChallengeParameter.
+// Code challenges are used to secure authorization code grants; for more information, visit
+// https://tools.ietf.org/html/rfc7636.
+type AcquireTokenAuthCodeParameters struct {
+ Scopes []string
+ Code string
+ Challenge string
+ RedirectURI string
+ AppType accesstokens.AppType
+ Credential *accesstokens.Credential
+}
+
+type AcquireTokenOnBehalfOfParameters struct {
+ Scopes []string
+ Credential *accesstokens.Credential
+ UserAssertion string
+}
+
+// AuthResult contains the results of one token acquisition operation in PublicClientApplication
+// or ConfidentialClientApplication. For details see https://aka.ms/msal-net-authenticationresult
+type AuthResult struct {
+ Account shared.Account
+ IDToken accesstokens.IDToken
+ AccessToken string
+ ExpiresOn time.Time
+ GrantedScopes []string
+ DeclinedScopes []string
+}
+
+// AuthResultFromStorage creates an AuthResult from a storage token response (which is generated from the cache).
+func AuthResultFromStorage(storageTokenResponse storage.TokenResponse) (AuthResult, error) {
+ if err := storageTokenResponse.AccessToken.Validate(); err != nil {
+ return AuthResult{}, fmt.Errorf("problem with access token in StorageTokenResponse: %w", err)
+ }
+
+ account := storageTokenResponse.Account
+ accessToken := storageTokenResponse.AccessToken.Secret
+ grantedScopes := strings.Split(storageTokenResponse.AccessToken.Scopes, scopeSeparator)
+
+ // Checking if there was an ID token in the cache; this will throw an error in the case of confidential client applications.
+ var idToken accesstokens.IDToken
+ if !storageTokenResponse.IDToken.IsZero() {
+ err := idToken.UnmarshalJSON([]byte(storageTokenResponse.IDToken.Secret))
+ if err != nil {
+ return AuthResult{}, fmt.Errorf("problem decoding JWT token: %w", err)
+ }
+ }
+ return AuthResult{account, idToken, accessToken, storageTokenResponse.AccessToken.ExpiresOn.T, grantedScopes, nil}, nil
+}
+
+// NewAuthResult creates an AuthResult.
+func NewAuthResult(tokenResponse accesstokens.TokenResponse, account shared.Account) (AuthResult, error) {
+ if len(tokenResponse.DeclinedScopes) > 0 {
+ return AuthResult{}, fmt.Errorf("token response failed because declined scopes are present: %s", strings.Join(tokenResponse.DeclinedScopes, ","))
+ }
+ return AuthResult{
+ Account: account,
+ IDToken: tokenResponse.IDToken,
+ AccessToken: tokenResponse.AccessToken,
+ ExpiresOn: tokenResponse.ExpiresOn.T,
+ GrantedScopes: tokenResponse.GrantedScopes.Slice,
+ }, nil
+}
+
+// Client is a base client that provides access to common methods and primatives that
+// can be used by multiple clients.
+type Client struct {
+ Token *oauth.Client
+ manager manager // *storage.Manager or fakeManager in tests
+ pmanager partitionedManager // *storage.PartitionedManager or fakeManager in tests
+
+ AuthParams authority.AuthParams // DO NOT EVER MAKE THIS A POINTER! See "Note" in New().
+ cacheAccessor cache.ExportReplace
+}
+
+// Option is an optional argument to the New constructor.
+type Option func(c *Client)
+
+// WithCacheAccessor allows you to set some type of cache for storing authentication tokens.
+func WithCacheAccessor(ca cache.ExportReplace) Option {
+ return func(c *Client) {
+ if ca != nil {
+ c.cacheAccessor = ca
+ }
+ }
+}
+
+// WithKnownAuthorityHosts specifies hosts Client shouldn't validate or request metadata for because they're known to the user
+func WithKnownAuthorityHosts(hosts []string) Option {
+ return func(c *Client) {
+ cp := make([]string, len(hosts))
+ copy(cp, hosts)
+ c.AuthParams.KnownAuthorityHosts = cp
+ }
+}
+
+// WithX5C specifies if x5c claim(public key of the certificate) should be sent to STS to enable Subject Name Issuer Authentication.
+func WithX5C(sendX5C bool) Option {
+ return func(c *Client) {
+ c.AuthParams.SendX5C = sendX5C
+ }
+}
+
+func WithRegionDetection(region string) Option {
+ return func(c *Client) {
+ c.AuthParams.AuthorityInfo.Region = region
+ }
+}
+
+// New is the constructor for Base.
+func New(clientID string, authorityURI string, token *oauth.Client, options ...Option) (Client, error) {
+ authInfo, err := authority.NewInfoFromAuthorityURI(authorityURI, true)
+ if err != nil {
+ return Client{}, err
+ }
+ authParams := authority.NewAuthParams(clientID, authInfo)
+ client := Client{ // Note: Hey, don't even THINK about making Base into *Base. See "design notes" in public.go and confidential.go
+ Token: token,
+ AuthParams: authParams,
+ cacheAccessor: noopCacheAccessor{},
+ manager: storage.New(token),
+ pmanager: storage.NewPartitionedManager(token),
+ }
+ for _, o := range options {
+ o(&client)
+ }
+ return client, nil
+
+}
+
+// AuthCodeURL creates a URL used to acquire an authorization code.
+func (b Client) AuthCodeURL(ctx context.Context, clientID, redirectURI string, scopes []string, authParams authority.AuthParams) (string, error) {
+ endpoints, err := b.Token.ResolveEndpoints(ctx, authParams.AuthorityInfo, "")
+ if err != nil {
+ return "", err
+ }
+
+ baseURL, err := url.Parse(endpoints.AuthorizationEndpoint)
+ if err != nil {
+ return "", err
+ }
+
+ v := url.Values{}
+ v.Add("client_id", clientID)
+ v.Add("response_type", "code")
+ v.Add("redirect_uri", redirectURI)
+ v.Add("scope", strings.Join(scopes, scopeSeparator))
+ if authParams.State != "" {
+ v.Add("state", authParams.State)
+ }
+ if authParams.CodeChallenge != "" {
+ v.Add("code_challenge", authParams.CodeChallenge)
+ }
+ if authParams.CodeChallengeMethod != "" {
+ v.Add("code_challenge_method", authParams.CodeChallengeMethod)
+ }
+ if authParams.Prompt != "" {
+ v.Add("prompt", authParams.Prompt)
+ }
+ // There were left over from an implementation that didn't use any of these. We may
+ // need to add them later, but as of now aren't needed.
+ /*
+ if p.ResponseMode != "" {
+ urlParams.Add("response_mode", p.ResponseMode)
+ }
+ if p.LoginHint != "" {
+ urlParams.Add("login_hint", p.LoginHint)
+ }
+ if p.DomainHint != "" {
+ urlParams.Add("domain_hint", p.DomainHint)
+ }
+ */
+ baseURL.RawQuery = v.Encode()
+ return baseURL.String(), nil
+}
+
+func (b Client) AcquireTokenSilent(ctx context.Context, silent AcquireTokenSilentParameters) (AuthResult, error) {
+ authParams := b.AuthParams // This is a copy, as we dont' have a pointer receiver and authParams is not a pointer.
+ authParams.Scopes = silent.Scopes
+ authParams.HomeAccountID = silent.Account.HomeAccountID
+ authParams.AuthorizationType = silent.AuthorizationType
+ authParams.UserAssertion = silent.UserAssertion
+
+ var storageTokenResponse storage.TokenResponse
+ var err error
+ if authParams.AuthorizationType == authority.ATOnBehalfOf {
+ if s, ok := b.pmanager.(cache.Serializer); ok {
+ suggestedCacheKey := authParams.CacheKey(silent.IsAppCache)
+ b.cacheAccessor.Replace(s, suggestedCacheKey)
+ defer b.cacheAccessor.Export(s, suggestedCacheKey)
+ }
+ storageTokenResponse, err = b.pmanager.Read(ctx, authParams)
+ if err != nil {
+ return AuthResult{}, err
+ }
+ } else {
+ if s, ok := b.manager.(cache.Serializer); ok {
+ suggestedCacheKey := authParams.CacheKey(silent.IsAppCache)
+ b.cacheAccessor.Replace(s, suggestedCacheKey)
+ defer b.cacheAccessor.Export(s, suggestedCacheKey)
+ }
+ authParams.AuthorizationType = authority.ATRefreshToken
+ storageTokenResponse, err = b.manager.Read(ctx, authParams, silent.Account)
+ if err != nil {
+ return AuthResult{}, err
+ }
+ }
+
+ result, err := AuthResultFromStorage(storageTokenResponse)
+ if err != nil {
+ if reflect.ValueOf(storageTokenResponse.RefreshToken).IsZero() {
+ return AuthResult{}, errors.New("no token found")
+ }
+
+ var cc *accesstokens.Credential
+ if silent.RequestType == accesstokens.ATConfidential {
+ cc = silent.Credential
+ }
+
+ token, err := b.Token.Refresh(ctx, silent.RequestType, authParams, cc, storageTokenResponse.RefreshToken)
+ if err != nil {
+ return AuthResult{}, err
+ }
+
+ return b.AuthResultFromToken(ctx, authParams, token, true)
+ }
+ return result, nil
+}
+
+func (b Client) AcquireTokenByAuthCode(ctx context.Context, authCodeParams AcquireTokenAuthCodeParameters) (AuthResult, error) {
+ authParams := b.AuthParams // This is a copy, as we dont' have a pointer receiver and .AuthParams is not a pointer.
+ authParams.Scopes = authCodeParams.Scopes
+ authParams.Redirecturi = authCodeParams.RedirectURI
+ authParams.AuthorizationType = authority.ATAuthCode
+
+ var cc *accesstokens.Credential
+ if authCodeParams.AppType == accesstokens.ATConfidential {
+ cc = authCodeParams.Credential
+ authParams.IsConfidentialClient = true
+ }
+
+ req, err := accesstokens.NewCodeChallengeRequest(authParams, authCodeParams.AppType, cc, authCodeParams.Code, authCodeParams.Challenge)
+ if err != nil {
+ return AuthResult{}, err
+ }
+
+ token, err := b.Token.AuthCode(ctx, req)
+ if err != nil {
+ return AuthResult{}, err
+ }
+
+ return b.AuthResultFromToken(ctx, authParams, token, true)
+}
+
+// AcquireTokenOnBehalfOf acquires a security token for an app using middle tier apps access token.
+func (b Client) AcquireTokenOnBehalfOf(ctx context.Context, onBehalfOfParams AcquireTokenOnBehalfOfParameters) (AuthResult, error) {
+ authParams := b.AuthParams // This is a copy, as we dont' have a pointer receiver and .AuthParams is not a pointer.
+ authParams.Scopes = onBehalfOfParams.Scopes
+ authParams.AuthorizationType = authority.ATOnBehalfOf
+ authParams.UserAssertion = onBehalfOfParams.UserAssertion
+
+ silentParameters := AcquireTokenSilentParameters{
+ Scopes: onBehalfOfParams.Scopes,
+ RequestType: accesstokens.ATConfidential,
+ Credential: onBehalfOfParams.Credential,
+ UserAssertion: onBehalfOfParams.UserAssertion,
+ AuthorizationType: authority.ATOnBehalfOf,
+ }
+ token, err := b.AcquireTokenSilent(ctx, silentParameters)
+ if err != nil {
+ fmt.Println("Acquire Token Silent failed ")
+ token, err := b.Token.OnBehalfOf(ctx, authParams, onBehalfOfParams.Credential)
+ if err != nil {
+ return AuthResult{}, err
+ }
+ return b.AuthResultFromToken(ctx, authParams, token, true)
+ }
+ return token, err
+}
+
+func (b Client) AuthResultFromToken(ctx context.Context, authParams authority.AuthParams, token accesstokens.TokenResponse, cacheWrite bool) (AuthResult, error) {
+ if !cacheWrite {
+ return NewAuthResult(token, shared.Account{})
+ }
+
+ var account shared.Account
+ var err error
+ if authParams.AuthorizationType == authority.ATOnBehalfOf {
+ if s, ok := b.pmanager.(cache.Serializer); ok {
+ suggestedCacheKey := token.CacheKey(authParams)
+ b.cacheAccessor.Replace(s, suggestedCacheKey)
+ defer b.cacheAccessor.Export(s, suggestedCacheKey)
+ }
+ account, err = b.pmanager.Write(authParams, token)
+ if err != nil {
+ return AuthResult{}, err
+ }
+ } else {
+ if s, ok := b.manager.(cache.Serializer); ok {
+ suggestedCacheKey := token.CacheKey(authParams)
+ b.cacheAccessor.Replace(s, suggestedCacheKey)
+ defer b.cacheAccessor.Export(s, suggestedCacheKey)
+ }
+ account, err = b.manager.Write(authParams, token)
+ if err != nil {
+ return AuthResult{}, err
+ }
+ }
+ return NewAuthResult(token, account)
+}
+
+func (b Client) AllAccounts() []shared.Account {
+ if s, ok := b.manager.(cache.Serializer); ok {
+ suggestedCacheKey := b.AuthParams.CacheKey(false)
+ b.cacheAccessor.Replace(s, suggestedCacheKey)
+ defer b.cacheAccessor.Export(s, suggestedCacheKey)
+ }
+
+ accounts := b.manager.AllAccounts()
+ return accounts
+}
+
+func (b Client) Account(homeAccountID string) shared.Account {
+ authParams := b.AuthParams // This is a copy, as we dont' have a pointer receiver and .AuthParams is not a pointer.
+ authParams.AuthorizationType = authority.AccountByID
+ authParams.HomeAccountID = homeAccountID
+ if s, ok := b.manager.(cache.Serializer); ok {
+ suggestedCacheKey := b.AuthParams.CacheKey(false)
+ b.cacheAccessor.Replace(s, suggestedCacheKey)
+ defer b.cacheAccessor.Export(s, suggestedCacheKey)
+ }
+ account := b.manager.Account(homeAccountID)
+ return account
+}
+
+// RemoveAccount removes all the ATs, RTs and IDTs from the cache associated with this account.
+func (b Client) RemoveAccount(account shared.Account) {
+ if s, ok := b.manager.(cache.Serializer); ok {
+ suggestedCacheKey := b.AuthParams.CacheKey(false)
+ b.cacheAccessor.Replace(s, suggestedCacheKey)
+ defer b.cacheAccessor.Export(s, suggestedCacheKey)
+ }
+ b.manager.RemoveAccount(account, b.AuthParams.ClientID)
+}
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/items.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/items.go
new file mode 100644
index 00000000000..548c2faebf9
--- /dev/null
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/items.go
@@ -0,0 +1,200 @@
+// Copyright (c) Microsoft Corporation.
+// Licensed under the MIT license.
+
+package storage
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "strings"
+ "time"
+
+ internalTime "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/types/time"
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens"
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared"
+)
+
+// Contract is the JSON structure that is written to any storage medium when serializing
+// the internal cache. This design is shared between MSAL versions in many languages.
+// This cannot be changed without design that includes other SDKs.
+type Contract struct {
+ AccessTokens map[string]AccessToken `json:"AccessToken,omitempty"`
+ RefreshTokens map[string]accesstokens.RefreshToken `json:"RefreshToken,omitempty"`
+ IDTokens map[string]IDToken `json:"IdToken,omitempty"`
+ Accounts map[string]shared.Account `json:"Account,omitempty"`
+ AppMetaData map[string]AppMetaData `json:"AppMetadata,omitempty"`
+
+ AdditionalFields map[string]interface{}
+}
+
+// Contract is the JSON structure that is written to any storage medium when serializing
+// the internal cache. This design is shared between MSAL versions in many languages.
+// This cannot be changed without design that includes other SDKs.
+type InMemoryContract struct {
+ AccessTokensPartition map[string]map[string]AccessToken
+ RefreshTokensPartition map[string]map[string]accesstokens.RefreshToken
+ IDTokensPartition map[string]map[string]IDToken
+ AccountsPartition map[string]map[string]shared.Account
+ AppMetaData map[string]AppMetaData
+}
+
+// NewContract is the constructor for Contract.
+func NewInMemoryContract() *InMemoryContract {
+ return &InMemoryContract{
+ AccessTokensPartition: map[string]map[string]AccessToken{},
+ RefreshTokensPartition: map[string]map[string]accesstokens.RefreshToken{},
+ IDTokensPartition: map[string]map[string]IDToken{},
+ AccountsPartition: map[string]map[string]shared.Account{},
+ AppMetaData: map[string]AppMetaData{},
+ }
+}
+
+// NewContract is the constructor for Contract.
+func NewContract() *Contract {
+ return &Contract{
+ AccessTokens: map[string]AccessToken{},
+ RefreshTokens: map[string]accesstokens.RefreshToken{},
+ IDTokens: map[string]IDToken{},
+ Accounts: map[string]shared.Account{},
+ AppMetaData: map[string]AppMetaData{},
+ AdditionalFields: map[string]interface{}{},
+ }
+}
+
+// AccessToken is the JSON representation of a MSAL access token for encoding to storage.
+type AccessToken struct {
+ HomeAccountID string `json:"home_account_id,omitempty"`
+ Environment string `json:"environment,omitempty"`
+ Realm string `json:"realm,omitempty"`
+ CredentialType string `json:"credential_type,omitempty"`
+ ClientID string `json:"client_id,omitempty"`
+ Secret string `json:"secret,omitempty"`
+ Scopes string `json:"target,omitempty"`
+ ExpiresOn internalTime.Unix `json:"expires_on,omitempty"`
+ ExtendedExpiresOn internalTime.Unix `json:"extended_expires_on,omitempty"`
+ CachedAt internalTime.Unix `json:"cached_at,omitempty"`
+ UserAssertionHash string `json:"user_assertion_hash,omitempty"`
+
+ AdditionalFields map[string]interface{}
+}
+
+// NewAccessToken is the constructor for AccessToken.
+func NewAccessToken(homeID, env, realm, clientID string, cachedAt, expiresOn, extendedExpiresOn time.Time, scopes, token string) AccessToken {
+ return AccessToken{
+ HomeAccountID: homeID,
+ Environment: env,
+ Realm: realm,
+ CredentialType: "AccessToken",
+ ClientID: clientID,
+ Secret: token,
+ Scopes: scopes,
+ CachedAt: internalTime.Unix{T: cachedAt.UTC()},
+ ExpiresOn: internalTime.Unix{T: expiresOn.UTC()},
+ ExtendedExpiresOn: internalTime.Unix{T: extendedExpiresOn.UTC()},
+ }
+}
+
+// Key outputs the key that can be used to uniquely look up this entry in a map.
+func (a AccessToken) Key() string {
+ return strings.Join(
+ []string{a.HomeAccountID, a.Environment, a.CredentialType, a.ClientID, a.Realm, a.Scopes},
+ shared.CacheKeySeparator,
+ )
+}
+
+// FakeValidate enables tests to fake access token validation
+var FakeValidate func(AccessToken) error
+
+// Validate validates that this AccessToken can be used.
+func (a AccessToken) Validate() error {
+ if FakeValidate != nil {
+ return FakeValidate(a)
+ }
+ if a.CachedAt.T.After(time.Now()) {
+ return errors.New("access token isn't valid, it was cached at a future time")
+ }
+ if a.ExpiresOn.T.Before(time.Now().Add(5 * time.Minute)) {
+ return fmt.Errorf("access token is expired")
+ }
+ if a.CachedAt.T.IsZero() {
+ return fmt.Errorf("access token does not have CachedAt set")
+ }
+ return nil
+}
+
+// IDToken is the JSON representation of an MSAL id token for encoding to storage.
+type IDToken struct {
+ HomeAccountID string `json:"home_account_id,omitempty"`
+ Environment string `json:"environment,omitempty"`
+ Realm string `json:"realm,omitempty"`
+ CredentialType string `json:"credential_type,omitempty"`
+ ClientID string `json:"client_id,omitempty"`
+ Secret string `json:"secret,omitempty"`
+ UserAssertionHash string `json:"user_assertion_hash,omitempty"`
+ AdditionalFields map[string]interface{}
+}
+
+// IsZero determines if IDToken is the zero value.
+func (i IDToken) IsZero() bool {
+ v := reflect.ValueOf(i)
+ for i := 0; i < v.NumField(); i++ {
+ field := v.Field(i)
+ if !field.IsZero() {
+ switch field.Kind() {
+ case reflect.Map, reflect.Slice:
+ if field.Len() == 0 {
+ continue
+ }
+ }
+ return false
+ }
+ }
+ return true
+}
+
+// NewIDToken is the constructor for IDToken.
+func NewIDToken(homeID, env, realm, clientID, idToken string) IDToken {
+ return IDToken{
+ HomeAccountID: homeID,
+ Environment: env,
+ Realm: realm,
+ CredentialType: "IDToken",
+ ClientID: clientID,
+ Secret: idToken,
+ }
+}
+
+// Key outputs the key that can be used to uniquely look up this entry in a map.
+func (id IDToken) Key() string {
+ return strings.Join(
+ []string{id.HomeAccountID, id.Environment, id.CredentialType, id.ClientID, id.Realm},
+ shared.CacheKeySeparator,
+ )
+}
+
+// AppMetaData is the JSON representation of application metadata for encoding to storage.
+type AppMetaData struct {
+ FamilyID string `json:"family_id,omitempty"`
+ ClientID string `json:"client_id,omitempty"`
+ Environment string `json:"environment,omitempty"`
+
+ AdditionalFields map[string]interface{}
+}
+
+// NewAppMetaData is the constructor for AppMetaData.
+func NewAppMetaData(familyID, clientID, environment string) AppMetaData {
+ return AppMetaData{
+ FamilyID: familyID,
+ ClientID: clientID,
+ Environment: environment,
+ }
+}
+
+// Key outputs the key that can be used to uniquely look up this entry in a map.
+func (a AppMetaData) Key() string {
+ return strings.Join(
+ []string{"AppMetaData", a.Environment, a.ClientID},
+ shared.CacheKeySeparator,
+ )
+}
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/partitioned_storage.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/partitioned_storage.go
new file mode 100644
index 00000000000..d17e7c034a4
--- /dev/null
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/partitioned_storage.go
@@ -0,0 +1,430 @@
+// Copyright (c) Microsoft Corporation.
+// Licensed under the MIT license.
+
+package storage
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json"
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth"
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens"
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority"
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared"
+)
+
+// PartitionedManager is a partitioned in-memory cache of access tokens, accounts and meta data.
+type PartitionedManager struct {
+ contract *InMemoryContract
+ contractMu sync.RWMutex
+ requests aadInstanceDiscoveryer // *oauth.Token
+
+ aadCacheMu sync.RWMutex
+ aadCache map[string]authority.InstanceDiscoveryMetadata
+}
+
+// NewPartitionedManager is the constructor for PartitionedManager.
+func NewPartitionedManager(requests *oauth.Client) *PartitionedManager {
+ m := &PartitionedManager{requests: requests, aadCache: make(map[string]authority.InstanceDiscoveryMetadata)}
+ m.contract = NewInMemoryContract()
+ return m
+}
+
+// Read reads a storage token from the cache if it exists.
+func (m *PartitionedManager) Read(ctx context.Context, authParameters authority.AuthParams) (TokenResponse, error) {
+ realm := authParameters.AuthorityInfo.Tenant
+ clientID := authParameters.ClientID
+ scopes := authParameters.Scopes
+
+ metadata, err := m.getMetadataEntry(ctx, authParameters.AuthorityInfo)
+ if err != nil {
+ return TokenResponse{}, err
+ }
+ userAssertionHash := authParameters.AssertionHash()
+ partitionKeyFromRequest := userAssertionHash
+
+ accessToken, err := m.readAccessToken(metadata.Aliases, realm, clientID, userAssertionHash, scopes, partitionKeyFromRequest)
+ if err != nil {
+ return TokenResponse{}, err
+ }
+
+ AppMetaData, err := m.readAppMetaData(metadata.Aliases, clientID)
+ if err != nil {
+ return TokenResponse{}, err
+ }
+ familyID := AppMetaData.FamilyID
+
+ refreshToken, err := m.readRefreshToken(metadata.Aliases, familyID, clientID, userAssertionHash, partitionKeyFromRequest)
+ if err != nil {
+ return TokenResponse{}, err
+ }
+
+ idToken, err := m.readIDToken(metadata.Aliases, realm, clientID, userAssertionHash, getPartitionKeyIDTokenRead(accessToken))
+ if err != nil {
+ return TokenResponse{}, err
+ }
+
+ account, err := m.readAccount(metadata.Aliases, realm, userAssertionHash, idToken.HomeAccountID)
+ if err != nil {
+ return TokenResponse{}, err
+ }
+ return TokenResponse{
+ AccessToken: accessToken,
+ RefreshToken: refreshToken,
+ IDToken: idToken,
+ Account: account,
+ }, nil
+}
+
+// Write writes a token response to the cache and returns the account information the token is stored with.
+func (m *PartitionedManager) Write(authParameters authority.AuthParams, tokenResponse accesstokens.TokenResponse) (shared.Account, error) {
+ authParameters.HomeAccountID = tokenResponse.ClientInfo.HomeAccountID()
+ homeAccountID := authParameters.HomeAccountID
+ environment := authParameters.AuthorityInfo.Host
+ realm := authParameters.AuthorityInfo.Tenant
+ clientID := authParameters.ClientID
+ target := strings.Join(tokenResponse.GrantedScopes.Slice, scopeSeparator)
+ userAssertionHash := authParameters.AssertionHash()
+ cachedAt := time.Now()
+
+ var account shared.Account
+
+ if len(tokenResponse.RefreshToken) > 0 {
+ refreshToken := accesstokens.NewRefreshToken(homeAccountID, environment, clientID, tokenResponse.RefreshToken, tokenResponse.FamilyID)
+ if authParameters.AuthorizationType == authority.ATOnBehalfOf {
+ refreshToken.UserAssertionHash = userAssertionHash
+ }
+ if err := m.writeRefreshToken(refreshToken, getPartitionKeyRefreshToken(refreshToken)); err != nil {
+ return account, err
+ }
+ }
+
+ if len(tokenResponse.AccessToken) > 0 {
+ accessToken := NewAccessToken(
+ homeAccountID,
+ environment,
+ realm,
+ clientID,
+ cachedAt,
+ tokenResponse.ExpiresOn.T,
+ tokenResponse.ExtExpiresOn.T,
+ target,
+ tokenResponse.AccessToken,
+ )
+ if authParameters.AuthorizationType == authority.ATOnBehalfOf {
+ accessToken.UserAssertionHash = userAssertionHash // get Hash method on this
+ }
+
+ // Since we have a valid access token, cache it before moving on.
+ if err := accessToken.Validate(); err == nil {
+ if err := m.writeAccessToken(accessToken, getPartitionKeyAccessToken(accessToken)); err != nil {
+ return account, err
+ }
+ } else {
+ return shared.Account{}, err
+ }
+ }
+
+ idTokenJwt := tokenResponse.IDToken
+ if !idTokenJwt.IsZero() {
+ idToken := NewIDToken(homeAccountID, environment, realm, clientID, idTokenJwt.RawToken)
+ if authParameters.AuthorizationType == authority.ATOnBehalfOf {
+ idToken.UserAssertionHash = userAssertionHash
+ }
+ if err := m.writeIDToken(idToken, getPartitionKeyIDToken(idToken)); err != nil {
+ return shared.Account{}, err
+ }
+
+ localAccountID := idTokenJwt.LocalAccountID()
+ authorityType := authParameters.AuthorityInfo.AuthorityType
+
+ account = shared.NewAccount(
+ homeAccountID,
+ environment,
+ realm,
+ localAccountID,
+ authorityType,
+ idTokenJwt.PreferredUsername,
+ )
+ if authParameters.AuthorizationType == authority.ATOnBehalfOf {
+ account.UserAssertionHash = userAssertionHash
+ }
+ if err := m.writeAccount(account, getPartitionKeyAccount(account)); err != nil {
+ return shared.Account{}, err
+ }
+ }
+
+ AppMetaData := NewAppMetaData(tokenResponse.FamilyID, clientID, environment)
+
+ if err := m.writeAppMetaData(AppMetaData); err != nil {
+ return shared.Account{}, err
+ }
+ return account, nil
+}
+
+func (m *PartitionedManager) getMetadataEntry(ctx context.Context, authorityInfo authority.Info) (authority.InstanceDiscoveryMetadata, error) {
+ md, err := m.aadMetadataFromCache(ctx, authorityInfo)
+ if err != nil {
+ // not in the cache, retrieve it
+ md, err = m.aadMetadata(ctx, authorityInfo)
+ }
+ return md, err
+}
+
+func (m *PartitionedManager) aadMetadataFromCache(ctx context.Context, authorityInfo authority.Info) (authority.InstanceDiscoveryMetadata, error) {
+ m.aadCacheMu.RLock()
+ defer m.aadCacheMu.RUnlock()
+ metadata, ok := m.aadCache[authorityInfo.Host]
+ if ok {
+ return metadata, nil
+ }
+ return metadata, errors.New("not found")
+}
+
+func (m *PartitionedManager) aadMetadata(ctx context.Context, authorityInfo authority.Info) (authority.InstanceDiscoveryMetadata, error) {
+ discoveryResponse, err := m.requests.AADInstanceDiscovery(ctx, authorityInfo)
+ if err != nil {
+ return authority.InstanceDiscoveryMetadata{}, err
+ }
+
+ m.aadCacheMu.Lock()
+ defer m.aadCacheMu.Unlock()
+
+ for _, metadataEntry := range discoveryResponse.Metadata {
+ for _, aliasedAuthority := range metadataEntry.Aliases {
+ m.aadCache[aliasedAuthority] = metadataEntry
+ }
+ }
+ if _, ok := m.aadCache[authorityInfo.Host]; !ok {
+ m.aadCache[authorityInfo.Host] = authority.InstanceDiscoveryMetadata{
+ PreferredNetwork: authorityInfo.Host,
+ PreferredCache: authorityInfo.Host,
+ }
+ }
+ return m.aadCache[authorityInfo.Host], nil
+}
+
+func (m *PartitionedManager) readAccessToken(envAliases []string, realm, clientID, userAssertionHash string, scopes []string, partitionKey string) (AccessToken, error) {
+ m.contractMu.RLock()
+ defer m.contractMu.RUnlock()
+ if accessTokens, ok := m.contract.AccessTokensPartition[partitionKey]; ok {
+ // TODO: linear search (over a map no less) is slow for a large number (thousands) of tokens.
+ // this shows up as the dominating node in a profile. for real-world scenarios this likely isn't
+ // an issue, however if it does become a problem then we know where to look.
+ for _, at := range accessTokens {
+ if at.Realm == realm && at.ClientID == clientID && at.UserAssertionHash == userAssertionHash {
+ if checkAlias(at.Environment, envAliases) {
+ if isMatchingScopes(scopes, at.Scopes) {
+ return at, nil
+ }
+ }
+ }
+ }
+ }
+ return AccessToken{}, fmt.Errorf("access token not found")
+}
+
+func (m *PartitionedManager) writeAccessToken(accessToken AccessToken, partitionKey string) error {
+ m.contractMu.Lock()
+ defer m.contractMu.Unlock()
+ key := accessToken.Key()
+ if m.contract.AccessTokensPartition[partitionKey] == nil {
+ m.contract.AccessTokensPartition[partitionKey] = make(map[string]AccessToken)
+ }
+ m.contract.AccessTokensPartition[partitionKey][key] = accessToken
+ return nil
+}
+
+func matchFamilyRefreshTokenObo(rt accesstokens.RefreshToken, userAssertionHash string, envAliases []string) bool {
+ return rt.UserAssertionHash == userAssertionHash && checkAlias(rt.Environment, envAliases) && rt.FamilyID != ""
+}
+
+func matchClientIDRefreshTokenObo(rt accesstokens.RefreshToken, userAssertionHash string, envAliases []string, clientID string) bool {
+ return rt.UserAssertionHash == userAssertionHash && checkAlias(rt.Environment, envAliases) && rt.ClientID == clientID
+}
+
+func (m *PartitionedManager) readRefreshToken(envAliases []string, familyID, clientID, userAssertionHash, partitionKey string) (accesstokens.RefreshToken, error) {
+ byFamily := func(rt accesstokens.RefreshToken) bool {
+ return matchFamilyRefreshTokenObo(rt, userAssertionHash, envAliases)
+ }
+ byClient := func(rt accesstokens.RefreshToken) bool {
+ return matchClientIDRefreshTokenObo(rt, userAssertionHash, envAliases, clientID)
+ }
+
+ var matchers []func(rt accesstokens.RefreshToken) bool
+ if familyID == "" {
+ matchers = []func(rt accesstokens.RefreshToken) bool{
+ byClient, byFamily,
+ }
+ } else {
+ matchers = []func(rt accesstokens.RefreshToken) bool{
+ byFamily, byClient,
+ }
+ }
+
+ // TODO(keegan): All the tests here pass, but Bogdan says this is
+ // more complicated. I'm opening an issue for this to have him
+ // review the tests and suggest tests that would break this so
+ // we can re-write against good tests. His comments as follow:
+ // The algorithm is a bit more complex than this, I assume there are some tests covering everything. I would keep the order as is.
+ // The algorithm is:
+ // If application is NOT part of the family, search by client_ID
+ // If app is part of the family or if we DO NOT KNOW if it's part of the family, search by family ID, then by client_id (we will know if an app is part of the family after the first token response).
+ // https://github.com/AzureAD/microsoft-authentication-library-for-dotnet/blob/311fe8b16e7c293462806f397e189a6aa1159769/src/client/Microsoft.Identity.Client/Internal/Requests/Silent/CacheSilentStrategy.cs#L95
+ m.contractMu.RLock()
+ defer m.contractMu.RUnlock()
+ for _, matcher := range matchers {
+ for _, rt := range m.contract.RefreshTokensPartition[partitionKey] {
+ if matcher(rt) {
+ return rt, nil
+ }
+ }
+ }
+
+ return accesstokens.RefreshToken{}, fmt.Errorf("refresh token not found")
+}
+
+func (m *PartitionedManager) writeRefreshToken(refreshToken accesstokens.RefreshToken, partitionKey string) error {
+ m.contractMu.Lock()
+ defer m.contractMu.Unlock()
+ key := refreshToken.Key()
+ if m.contract.AccessTokensPartition[partitionKey] == nil {
+ m.contract.RefreshTokensPartition[partitionKey] = make(map[string]accesstokens.RefreshToken)
+ }
+ m.contract.RefreshTokensPartition[partitionKey][key] = refreshToken
+ return nil
+}
+
+func (m *PartitionedManager) readIDToken(envAliases []string, realm, clientID, userAssertionHash, partitionKey string) (IDToken, error) {
+ m.contractMu.RLock()
+ defer m.contractMu.RUnlock()
+ for _, idt := range m.contract.IDTokensPartition[partitionKey] {
+ if idt.Realm == realm && idt.ClientID == clientID && idt.UserAssertionHash == userAssertionHash {
+ if checkAlias(idt.Environment, envAliases) {
+ return idt, nil
+ }
+ }
+ }
+ return IDToken{}, fmt.Errorf("token not found")
+}
+
+func (m *PartitionedManager) writeIDToken(idToken IDToken, partitionKey string) error {
+ key := idToken.Key()
+ m.contractMu.Lock()
+ defer m.contractMu.Unlock()
+ if m.contract.IDTokensPartition[partitionKey] == nil {
+ m.contract.IDTokensPartition[partitionKey] = make(map[string]IDToken)
+ }
+ m.contract.IDTokensPartition[partitionKey][key] = idToken
+ return nil
+}
+
+func (m *PartitionedManager) readAccount(envAliases []string, realm, UserAssertionHash, partitionKey string) (shared.Account, error) {
+ m.contractMu.RLock()
+ defer m.contractMu.RUnlock()
+
+ // You might ask why, if cache.Accounts is a map, we would loop through all of these instead of using a key.
+ // We only use a map because the storage contract shared between all language implementations says use a map.
+ // We can't change that. The other is because the keys are made using a specific "env", but here we are allowing
+ // a match in multiple envs (envAlias). That means we either need to hash each possible keyand do the lookup
+ // or just statically check. Since the design is to have a storage.Manager per user, the amount of keys stored
+ // is really low (say 2). Each hash is more expensive than the entire iteration.
+ for _, acc := range m.contract.AccountsPartition[partitionKey] {
+ if checkAlias(acc.Environment, envAliases) && acc.UserAssertionHash == UserAssertionHash && acc.Realm == realm {
+ return acc, nil
+ }
+ }
+ return shared.Account{}, fmt.Errorf("account not found")
+}
+
+func (m *PartitionedManager) writeAccount(account shared.Account, partitionKey string) error {
+ key := account.Key()
+ m.contractMu.Lock()
+ defer m.contractMu.Unlock()
+ if m.contract.AccountsPartition[partitionKey] == nil {
+ m.contract.AccountsPartition[partitionKey] = make(map[string]shared.Account)
+ }
+ m.contract.AccountsPartition[partitionKey][key] = account
+ return nil
+}
+
+func (m *PartitionedManager) readAppMetaData(envAliases []string, clientID string) (AppMetaData, error) {
+ m.contractMu.RLock()
+ defer m.contractMu.RUnlock()
+
+ for _, app := range m.contract.AppMetaData {
+ if checkAlias(app.Environment, envAliases) && app.ClientID == clientID {
+ return app, nil
+ }
+ }
+ return AppMetaData{}, fmt.Errorf("not found")
+}
+
+func (m *PartitionedManager) writeAppMetaData(AppMetaData AppMetaData) error {
+ key := AppMetaData.Key()
+ m.contractMu.Lock()
+ defer m.contractMu.Unlock()
+ m.contract.AppMetaData[key] = AppMetaData
+ return nil
+}
+
+// update updates the internal cache object. This is for use in tests, other uses are not
+// supported.
+func (m *PartitionedManager) update(cache *InMemoryContract) {
+ m.contractMu.Lock()
+ defer m.contractMu.Unlock()
+ m.contract = cache
+}
+
+// Marshal implements cache.Marshaler.
+func (m *PartitionedManager) Marshal() ([]byte, error) {
+ return json.Marshal(m.contract)
+}
+
+// Unmarshal implements cache.Unmarshaler.
+func (m *PartitionedManager) Unmarshal(b []byte) error {
+ m.contractMu.Lock()
+ defer m.contractMu.Unlock()
+
+ contract := NewInMemoryContract()
+
+ err := json.Unmarshal(b, contract)
+ if err != nil {
+ return err
+ }
+
+ m.contract = contract
+
+ return nil
+}
+
+func getPartitionKeyAccessToken(item AccessToken) string {
+ if item.UserAssertionHash != "" {
+ return item.UserAssertionHash
+ }
+ return item.HomeAccountID
+}
+
+func getPartitionKeyRefreshToken(item accesstokens.RefreshToken) string {
+ if item.UserAssertionHash != "" {
+ return item.UserAssertionHash
+ }
+ return item.HomeAccountID
+}
+
+func getPartitionKeyIDToken(item IDToken) string {
+ return item.HomeAccountID
+}
+
+func getPartitionKeyAccount(item shared.Account) string {
+ return item.HomeAccountID
+}
+
+func getPartitionKeyIDTokenRead(item AccessToken) string {
+ return item.HomeAccountID
+}
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/storage.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/storage.go
new file mode 100644
index 00000000000..b759408b5b4
--- /dev/null
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/storage.go
@@ -0,0 +1,517 @@
+// Copyright (c) Microsoft Corporation.
+// Licensed under the MIT license.
+
+// Package storage holds all cached token information for MSAL. This storage can be
+// augmented with third-party extensions to provide persistent storage. In that case,
+// reads and writes in upper packages will call Marshal() to take the entire in-memory
+// representation and write it to storage and Unmarshal() to update the entire in-memory
+// storage with what was in the persistent storage. The persistent storage can only be
+// accessed in this way because multiple MSAL clients written in multiple languages can
+// access the same storage and must adhere to the same method that was defined
+// previously.
+package storage
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json"
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth"
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens"
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority"
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared"
+)
+
+// aadInstanceDiscoveryer allows faking in tests.
+// It is implemented in production by ops/authority.Client
+type aadInstanceDiscoveryer interface {
+ AADInstanceDiscovery(ctx context.Context, authorityInfo authority.Info) (authority.InstanceDiscoveryResponse, error)
+}
+
+// TokenResponse mimics a token response that was pulled from the cache.
+type TokenResponse struct {
+ RefreshToken accesstokens.RefreshToken
+ IDToken IDToken // *Credential
+ AccessToken AccessToken
+ Account shared.Account
+}
+
+// Manager is an in-memory cache of access tokens, accounts and meta data. This data is
+// updated on read/write calls. Unmarshal() replaces all data stored here with whatever
+// was given to it on each call.
+type Manager struct {
+ contract *Contract
+ contractMu sync.RWMutex
+ requests aadInstanceDiscoveryer // *oauth.Token
+
+ aadCacheMu sync.RWMutex
+ aadCache map[string]authority.InstanceDiscoveryMetadata
+}
+
+// New is the constructor for Manager.
+func New(requests *oauth.Client) *Manager {
+ m := &Manager{requests: requests, aadCache: make(map[string]authority.InstanceDiscoveryMetadata)}
+ m.contract = NewContract()
+ return m
+}
+
+func checkAlias(alias string, aliases []string) bool {
+ for _, v := range aliases {
+ if alias == v {
+ return true
+ }
+ }
+ return false
+}
+
+func isMatchingScopes(scopesOne []string, scopesTwo string) bool {
+ newScopesTwo := strings.Split(scopesTwo, scopeSeparator)
+ scopeCounter := 0
+ for _, scope := range scopesOne {
+ for _, otherScope := range newScopesTwo {
+ if strings.EqualFold(scope, otherScope) {
+ scopeCounter++
+ continue
+ }
+ }
+ }
+ return scopeCounter == len(scopesOne)
+}
+
+// Read reads a storage token from the cache if it exists.
+func (m *Manager) Read(ctx context.Context, authParameters authority.AuthParams, account shared.Account) (TokenResponse, error) {
+ homeAccountID := authParameters.HomeAccountID
+ realm := authParameters.AuthorityInfo.Tenant
+ clientID := authParameters.ClientID
+ scopes := authParameters.Scopes
+
+ // fetch metadata if and only if the authority isn't explicitly trusted
+ aliases := authParameters.KnownAuthorityHosts
+ if len(aliases) == 0 {
+ metadata, err := m.getMetadataEntry(ctx, authParameters.AuthorityInfo)
+ if err != nil {
+ return TokenResponse{}, err
+ }
+ aliases = metadata.Aliases
+ }
+
+ accessToken := m.readAccessToken(homeAccountID, aliases, realm, clientID, scopes)
+
+ if account.IsZero() {
+ return TokenResponse{
+ AccessToken: accessToken,
+ RefreshToken: accesstokens.RefreshToken{},
+ IDToken: IDToken{},
+ Account: shared.Account{},
+ }, nil
+ }
+ idToken, err := m.readIDToken(homeAccountID, aliases, realm, clientID)
+ if err != nil {
+ return TokenResponse{}, err
+ }
+
+ AppMetaData, err := m.readAppMetaData(aliases, clientID)
+ if err != nil {
+ return TokenResponse{}, err
+ }
+ familyID := AppMetaData.FamilyID
+
+ refreshToken, err := m.readRefreshToken(homeAccountID, aliases, familyID, clientID)
+ if err != nil {
+ return TokenResponse{}, err
+ }
+ account, err = m.readAccount(homeAccountID, aliases, realm)
+ if err != nil {
+ return TokenResponse{}, err
+ }
+ return TokenResponse{
+ AccessToken: accessToken,
+ RefreshToken: refreshToken,
+ IDToken: idToken,
+ Account: account,
+ }, nil
+}
+
+const scopeSeparator = " "
+
+// Write writes a token response to the cache and returns the account information the token is stored with.
+func (m *Manager) Write(authParameters authority.AuthParams, tokenResponse accesstokens.TokenResponse) (shared.Account, error) {
+ authParameters.HomeAccountID = tokenResponse.ClientInfo.HomeAccountID()
+ homeAccountID := authParameters.HomeAccountID
+ environment := authParameters.AuthorityInfo.Host
+ realm := authParameters.AuthorityInfo.Tenant
+ clientID := authParameters.ClientID
+ target := strings.Join(tokenResponse.GrantedScopes.Slice, scopeSeparator)
+
+ cachedAt := time.Now()
+
+ var account shared.Account
+
+ if len(tokenResponse.RefreshToken) > 0 {
+ refreshToken := accesstokens.NewRefreshToken(homeAccountID, environment, clientID, tokenResponse.RefreshToken, tokenResponse.FamilyID)
+ if err := m.writeRefreshToken(refreshToken); err != nil {
+ return account, err
+ }
+ }
+
+ if len(tokenResponse.AccessToken) > 0 {
+ accessToken := NewAccessToken(
+ homeAccountID,
+ environment,
+ realm,
+ clientID,
+ cachedAt,
+ tokenResponse.ExpiresOn.T,
+ tokenResponse.ExtExpiresOn.T,
+ target,
+ tokenResponse.AccessToken,
+ )
+
+ // Since we have a valid access token, cache it before moving on.
+ if err := accessToken.Validate(); err == nil {
+ if err := m.writeAccessToken(accessToken); err != nil {
+ return account, err
+ }
+ }
+ }
+
+ idTokenJwt := tokenResponse.IDToken
+ if !idTokenJwt.IsZero() {
+ idToken := NewIDToken(homeAccountID, environment, realm, clientID, idTokenJwt.RawToken)
+ if err := m.writeIDToken(idToken); err != nil {
+ return shared.Account{}, err
+ }
+
+ localAccountID := idTokenJwt.LocalAccountID()
+ authorityType := authParameters.AuthorityInfo.AuthorityType
+
+ account = shared.NewAccount(
+ homeAccountID,
+ environment,
+ realm,
+ localAccountID,
+ authorityType,
+ idTokenJwt.PreferredUsername,
+ )
+ if err := m.writeAccount(account); err != nil {
+ return shared.Account{}, err
+ }
+ }
+
+ AppMetaData := NewAppMetaData(tokenResponse.FamilyID, clientID, environment)
+
+ if err := m.writeAppMetaData(AppMetaData); err != nil {
+ return shared.Account{}, err
+ }
+ return account, nil
+}
+
+func (m *Manager) getMetadataEntry(ctx context.Context, authorityInfo authority.Info) (authority.InstanceDiscoveryMetadata, error) {
+ md, err := m.aadMetadataFromCache(ctx, authorityInfo)
+ if err != nil {
+ // not in the cache, retrieve it
+ md, err = m.aadMetadata(ctx, authorityInfo)
+ }
+ return md, err
+}
+
+func (m *Manager) aadMetadataFromCache(ctx context.Context, authorityInfo authority.Info) (authority.InstanceDiscoveryMetadata, error) {
+ m.aadCacheMu.RLock()
+ defer m.aadCacheMu.RUnlock()
+ metadata, ok := m.aadCache[authorityInfo.Host]
+ if ok {
+ return metadata, nil
+ }
+ return metadata, errors.New("not found")
+}
+
+func (m *Manager) aadMetadata(ctx context.Context, authorityInfo authority.Info) (authority.InstanceDiscoveryMetadata, error) {
+ m.aadCacheMu.Lock()
+ defer m.aadCacheMu.Unlock()
+ discoveryResponse, err := m.requests.AADInstanceDiscovery(ctx, authorityInfo)
+ if err != nil {
+ return authority.InstanceDiscoveryMetadata{}, err
+ }
+
+ for _, metadataEntry := range discoveryResponse.Metadata {
+ for _, aliasedAuthority := range metadataEntry.Aliases {
+ m.aadCache[aliasedAuthority] = metadataEntry
+ }
+ }
+ if _, ok := m.aadCache[authorityInfo.Host]; !ok {
+ m.aadCache[authorityInfo.Host] = authority.InstanceDiscoveryMetadata{
+ PreferredNetwork: authorityInfo.Host,
+ PreferredCache: authorityInfo.Host,
+ }
+ }
+ return m.aadCache[authorityInfo.Host], nil
+}
+
+func (m *Manager) readAccessToken(homeID string, envAliases []string, realm, clientID string, scopes []string) AccessToken {
+ m.contractMu.RLock()
+ defer m.contractMu.RUnlock()
+ // TODO: linear search (over a map no less) is slow for a large number (thousands) of tokens.
+ // this shows up as the dominating node in a profile. for real-world scenarios this likely isn't
+ // an issue, however if it does become a problem then we know where to look.
+ for _, at := range m.contract.AccessTokens {
+ if at.HomeAccountID == homeID && at.Realm == realm && at.ClientID == clientID {
+ if checkAlias(at.Environment, envAliases) {
+ if isMatchingScopes(scopes, at.Scopes) {
+ return at
+ }
+ }
+ }
+ }
+ return AccessToken{}
+}
+
+func (m *Manager) writeAccessToken(accessToken AccessToken) error {
+ m.contractMu.Lock()
+ defer m.contractMu.Unlock()
+ key := accessToken.Key()
+ m.contract.AccessTokens[key] = accessToken
+ return nil
+}
+
+func (m *Manager) readRefreshToken(homeID string, envAliases []string, familyID, clientID string) (accesstokens.RefreshToken, error) {
+ byFamily := func(rt accesstokens.RefreshToken) bool {
+ return matchFamilyRefreshToken(rt, homeID, envAliases)
+ }
+ byClient := func(rt accesstokens.RefreshToken) bool {
+ return matchClientIDRefreshToken(rt, homeID, envAliases, clientID)
+ }
+
+ var matchers []func(rt accesstokens.RefreshToken) bool
+ if familyID == "" {
+ matchers = []func(rt accesstokens.RefreshToken) bool{
+ byClient, byFamily,
+ }
+ } else {
+ matchers = []func(rt accesstokens.RefreshToken) bool{
+ byFamily, byClient,
+ }
+ }
+
+ // TODO(keegan): All the tests here pass, but Bogdan says this is
+ // more complicated. I'm opening an issue for this to have him
+ // review the tests and suggest tests that would break this so
+ // we can re-write against good tests. His comments as follow:
+ // The algorithm is a bit more complex than this, I assume there are some tests covering everything. I would keep the order as is.
+ // The algorithm is:
+ // If application is NOT part of the family, search by client_ID
+ // If app is part of the family or if we DO NOT KNOW if it's part of the family, search by family ID, then by client_id (we will know if an app is part of the family after the first token response).
+ // https://github.com/AzureAD/microsoft-authentication-library-for-dotnet/blob/311fe8b16e7c293462806f397e189a6aa1159769/src/client/Microsoft.Identity.Client/Internal/Requests/Silent/CacheSilentStrategy.cs#L95
+ m.contractMu.RLock()
+ defer m.contractMu.RUnlock()
+ for _, matcher := range matchers {
+ for _, rt := range m.contract.RefreshTokens {
+ if matcher(rt) {
+ return rt, nil
+ }
+ }
+ }
+
+ return accesstokens.RefreshToken{}, fmt.Errorf("refresh token not found")
+}
+
+func matchFamilyRefreshToken(rt accesstokens.RefreshToken, homeID string, envAliases []string) bool {
+ return rt.HomeAccountID == homeID && checkAlias(rt.Environment, envAliases) && rt.FamilyID != ""
+}
+
+func matchClientIDRefreshToken(rt accesstokens.RefreshToken, homeID string, envAliases []string, clientID string) bool {
+ return rt.HomeAccountID == homeID && checkAlias(rt.Environment, envAliases) && rt.ClientID == clientID
+}
+
+func (m *Manager) writeRefreshToken(refreshToken accesstokens.RefreshToken) error {
+ key := refreshToken.Key()
+ m.contractMu.Lock()
+ defer m.contractMu.Unlock()
+ m.contract.RefreshTokens[key] = refreshToken
+ return nil
+}
+
+func (m *Manager) readIDToken(homeID string, envAliases []string, realm, clientID string) (IDToken, error) {
+ m.contractMu.RLock()
+ defer m.contractMu.RUnlock()
+ for _, idt := range m.contract.IDTokens {
+ if idt.HomeAccountID == homeID && idt.Realm == realm && idt.ClientID == clientID {
+ if checkAlias(idt.Environment, envAliases) {
+ return idt, nil
+ }
+ }
+ }
+ return IDToken{}, fmt.Errorf("token not found")
+}
+
+func (m *Manager) writeIDToken(idToken IDToken) error {
+ key := idToken.Key()
+ m.contractMu.Lock()
+ defer m.contractMu.Unlock()
+ m.contract.IDTokens[key] = idToken
+ return nil
+}
+
+func (m *Manager) AllAccounts() []shared.Account {
+ m.contractMu.RLock()
+ defer m.contractMu.RUnlock()
+
+ var accounts []shared.Account
+ for _, v := range m.contract.Accounts {
+ accounts = append(accounts, v)
+ }
+
+ return accounts
+}
+
+func (m *Manager) Account(homeAccountID string) shared.Account {
+ m.contractMu.RLock()
+ defer m.contractMu.RUnlock()
+
+ for _, v := range m.contract.Accounts {
+ if v.HomeAccountID == homeAccountID {
+ return v
+ }
+ }
+
+ return shared.Account{}
+}
+
+func (m *Manager) readAccount(homeAccountID string, envAliases []string, realm string) (shared.Account, error) {
+ m.contractMu.RLock()
+ defer m.contractMu.RUnlock()
+
+ // You might ask why, if cache.Accounts is a map, we would loop through all of these instead of using a key.
+ // We only use a map because the storage contract shared between all language implementations says use a map.
+ // We can't change that. The other is because the keys are made using a specific "env", but here we are allowing
+ // a match in multiple envs (envAlias). That means we either need to hash each possible keyand do the lookup
+ // or just statically check. Since the design is to have a storage.Manager per user, the amount of keys stored
+ // is really low (say 2). Each hash is more expensive than the entire iteration.
+ for _, acc := range m.contract.Accounts {
+ if acc.HomeAccountID == homeAccountID && checkAlias(acc.Environment, envAliases) && acc.Realm == realm {
+ return acc, nil
+ }
+ }
+ return shared.Account{}, fmt.Errorf("account not found")
+}
+
+func (m *Manager) writeAccount(account shared.Account) error {
+ key := account.Key()
+ m.contractMu.Lock()
+ defer m.contractMu.Unlock()
+ m.contract.Accounts[key] = account
+ return nil
+}
+
+func (m *Manager) readAppMetaData(envAliases []string, clientID string) (AppMetaData, error) {
+ m.contractMu.RLock()
+ defer m.contractMu.RUnlock()
+
+ for _, app := range m.contract.AppMetaData {
+ if checkAlias(app.Environment, envAliases) && app.ClientID == clientID {
+ return app, nil
+ }
+ }
+ return AppMetaData{}, fmt.Errorf("not found")
+}
+
+func (m *Manager) writeAppMetaData(AppMetaData AppMetaData) error {
+ key := AppMetaData.Key()
+ m.contractMu.Lock()
+ defer m.contractMu.Unlock()
+ m.contract.AppMetaData[key] = AppMetaData
+ return nil
+}
+
+// RemoveAccount removes all the associated ATs, RTs and IDTs from the cache associated with this account.
+func (m *Manager) RemoveAccount(account shared.Account, clientID string) {
+ m.removeRefreshTokens(account.HomeAccountID, account.Environment, clientID)
+ m.removeAccessTokens(account.HomeAccountID, account.Environment)
+ m.removeIDTokens(account.HomeAccountID, account.Environment)
+ m.removeAccounts(account.HomeAccountID, account.Environment)
+}
+
+func (m *Manager) removeRefreshTokens(homeID string, env string, clientID string) {
+ m.contractMu.Lock()
+ defer m.contractMu.Unlock()
+ for key, rt := range m.contract.RefreshTokens {
+ // Check for RTs associated with the account.
+ if rt.HomeAccountID == homeID && rt.Environment == env {
+ // Do RT's app ownership check as a precaution, in case family apps
+ // and 3rd-party apps share same token cache, although they should not.
+ if rt.ClientID == clientID || rt.FamilyID != "" {
+ delete(m.contract.RefreshTokens, key)
+ }
+ }
+ }
+}
+
+func (m *Manager) removeAccessTokens(homeID string, env string) {
+ m.contractMu.Lock()
+ defer m.contractMu.Unlock()
+ for key, at := range m.contract.AccessTokens {
+ // Remove AT's associated with the account
+ if at.HomeAccountID == homeID && at.Environment == env {
+ // # To avoid the complexity of locating sibling family app's AT, we skip AT's app ownership check.
+ // It means ATs for other apps will also be removed, it is OK because:
+ // non-family apps are not supposed to share token cache to begin with;
+ // Even if it happens, we keep other app's RT already, so SSO still works.
+ delete(m.contract.AccessTokens, key)
+ }
+ }
+}
+
+func (m *Manager) removeIDTokens(homeID string, env string) {
+ m.contractMu.Lock()
+ defer m.contractMu.Unlock()
+ for key, idt := range m.contract.IDTokens {
+ // Remove ID tokens associated with the account.
+ if idt.HomeAccountID == homeID && idt.Environment == env {
+ delete(m.contract.IDTokens, key)
+ }
+ }
+}
+
+func (m *Manager) removeAccounts(homeID string, env string) {
+ m.contractMu.Lock()
+ defer m.contractMu.Unlock()
+ for key, acc := range m.contract.Accounts {
+ // Remove the specified account.
+ if acc.HomeAccountID == homeID && acc.Environment == env {
+ delete(m.contract.Accounts, key)
+ }
+ }
+}
+
+// update updates the internal cache object. This is for use in tests, other uses are not
+// supported.
+func (m *Manager) update(cache *Contract) {
+ m.contractMu.Lock()
+ defer m.contractMu.Unlock()
+ m.contract = cache
+}
+
+// Marshal implements cache.Marshaler.
+func (m *Manager) Marshal() ([]byte, error) {
+ return json.Marshal(m.contract)
+}
+
+// Unmarshal implements cache.Unmarshaler.
+func (m *Manager) Unmarshal(b []byte) error {
+ m.contractMu.Lock()
+ defer m.contractMu.Unlock()
+
+ contract := NewContract()
+
+ err := json.Unmarshal(b, contract)
+ if err != nil {
+ return err
+ }
+
+ m.contract = contract
+
+ return nil
+}
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/test_serialized_cache.json b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/test_serialized_cache.json
new file mode 100644
index 00000000000..1d8181924d1
--- /dev/null
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/test_serialized_cache.json
@@ -0,0 +1,56 @@
+{
+ "Account": {
+ "uid.utid-login.windows.net-contoso": {
+ "username": "John Doe",
+ "local_account_id": "object1234",
+ "realm": "contoso",
+ "environment": "login.windows.net",
+ "home_account_id": "uid.utid",
+ "authority_type": "MSSTS"
+ }
+ },
+ "RefreshToken": {
+ "uid.utid-login.windows.net-refreshtoken-my_client_id--s2 s1 s3": {
+ "target": "s2 s1 s3",
+ "environment": "login.windows.net",
+ "credential_type": "RefreshToken",
+ "secret": "a refresh token",
+ "client_id": "my_client_id",
+ "home_account_id": "uid.utid"
+ }
+ },
+ "AccessToken": {
+ "an-entry": {
+ "foo": "bar"
+ },
+ "uid.utid-login.windows.net-accesstoken-my_client_id-contoso-s2 s1 s3": {
+ "environment": "login.windows.net",
+ "credential_type": "AccessToken",
+ "secret": "an access token",
+ "realm": "contoso",
+ "target": "s2 s1 s3",
+ "client_id": "my_client_id",
+ "cached_at": "1000",
+ "home_account_id": "uid.utid",
+ "extended_expires_on": "4600",
+ "expires_on": "4600"
+ }
+ },
+ "IdToken": {
+ "uid.utid-login.windows.net-idtoken-my_client_id-contoso-": {
+ "realm": "contoso",
+ "environment": "login.windows.net",
+ "credential_type": "IdToken",
+ "secret": "header.eyJvaWQiOiAib2JqZWN0MTIzNCIsICJwcmVmZXJyZWRfdXNlcm5hbWUiOiAiSm9obiBEb2UiLCAic3ViIjogInN1YiJ9.signature",
+ "client_id": "my_client_id",
+ "home_account_id": "uid.utid"
+ }
+ },
+ "unknownEntity": {"field1":"1","field2":"whats"},
+ "AppMetadata": {
+ "AppMetadata-login.windows.net-my_client_id": {
+ "environment": "login.windows.net",
+ "client_id": "my_client_id"
+ }
+ }
+ }
\ No newline at end of file
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/exported/exported.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/exported/exported.go
new file mode 100644
index 00000000000..7b673e3fe12
--- /dev/null
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/exported/exported.go
@@ -0,0 +1,34 @@
+// Copyright (c) Microsoft Corporation.
+// Licensed under the MIT license.
+
+// package exported contains internal types that are re-exported from a public package
+package exported
+
+// AssertionRequestOptions has information required to generate a client assertion
+type AssertionRequestOptions struct {
+ // ClientID identifies the application for which an assertion is requested. Used as the assertion's "iss" and "sub" claims.
+ ClientID string
+
+ // TokenEndpoint is the intended token endpoint. Used as the assertion's "aud" claim.
+ TokenEndpoint string
+}
+
+// TokenProviderParameters is the authentication parameters passed to token providers
+type TokenProviderParameters struct {
+ // Claims contains any additional claims requested for the token
+ Claims string
+ // CorrelationID of the authentication request
+ CorrelationID string
+ // Scopes requested for the token
+ Scopes []string
+ // TenantID identifies the tenant in which to authenticate
+ TenantID string
+}
+
+// TokenProviderResult is the authentication result returned by custom token providers
+type TokenProviderResult struct {
+ // AccessToken is the requested token
+ AccessToken string
+ // ExpiresInSeconds is the lifetime of the token in seconds
+ ExpiresInSeconds int
+}
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/design.md b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/design.md
new file mode 100644
index 00000000000..09edb01b7e4
--- /dev/null
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/design.md
@@ -0,0 +1,140 @@
+# JSON Package Design
+Author: John Doak(jdoak@microsoft.com)
+
+## Why?
+
+This project needs a special type of marshal/unmarshal not directly supported
+by the encoding/json package.
+
+The need revolves around a few key wants/needs:
+- unmarshal and marshal structs representing JSON messages
+- fields in the messgage not in the struct must be maintained when unmarshalled
+- those same fields must be marshalled back when encoded again
+
+The initial version used map[string]interface{} to put in the keys that
+were known and then any other keys were put into a field called AdditionalFields.
+
+This has a few negatives:
+- Dual marshaling/unmarshalling is required
+- Adding a struct field requires manually adding a key by name to be encoded/decoded from the map (which is a loosely coupled construct), which can lead to bugs that aren't detected or have bad side effects
+- Tests can become quickly disconnected if those keys aren't put
+in tests as well. So you think you have support working, but you
+don't. Existing tests were found that didn't test the marshalling output.
+- There is no enforcement that if AdditionalFields is required on one struct, it should be on all containers
+that don't have custom marshal/unmarshal.
+
+This package aims to support our needs by providing custom Marshal()/Unmarshal() functions.
+
+This prevents all the negatives in the initial solution listed above. However, it does add its own negative:
+- Custom encoding/decoding via reflection is messy (as can be seen in encoding/json itself)
+
+Go proverb: Reflection is never clear
+Suggested reading: https://blog.golang.org/laws-of-reflection
+
+## Important design decisions
+
+- We don't want to understand all JSON decoding rules
+- We don't want to deal with all the quoting, commas, etc on decode
+- Need support for json.Marshaler/Unmarshaler, so we can support types like time.Time
+- If struct does not implement json.Unmarshaler, it must have AdditionalFields defined
+- We only support root level objects that are \*struct or struct
+
+To faciliate these goals, we will utilize the json.Encoder and json.Decoder.
+They provide streaming processing (efficient) and return errors on bad JSON.
+
+Support for json.Marshaler/Unmarshaler allows for us to use non-basic types
+that must be specially encoded/decoded (like time.Time objects).
+
+We don't support types that can't customer unmarshal or have AdditionalFields
+in order to prevent future devs from forgetting that important field and
+generating bad return values.
+
+Support for root level objects of \*struct or struct simply acknowledges the
+fact that this is designed only for the purposes listed in the Introduction.
+Outside that (like encoding a lone number) should be done with the
+regular json package (as it will not have additional fields).
+
+We don't support a few things on json supported reference types and structs:
+- \*map: no need for pointers to maps
+- \*slice: no need for pointers to slices
+- any further pointers on struct after \*struct
+
+There should never be a need for this in Go.
+
+## Design
+
+## State Machines
+
+This uses state machine designs that based upon the Rob Pike talk on
+lexers and parsers: https://www.youtube.com/watch?v=HxaD_trXwRE
+
+This is the most common pattern for state machines in Go and
+the model to follow closesly when dealing with streaming
+processing of textual data.
+
+Our state machines are based on the type:
+```go
+type stateFn func() (stateFn, error)
+```
+
+The state machine itself is simply a struct that has methods that
+satisfy stateFn.
+
+Our state machines have a few standard calls
+- run(): runs the state machine
+- start(): always the first stateFn to be called
+
+All state machines have the following logic:
+* run() is called
+* start() is called and returns the next stateFn or error
+* stateFn is called
+ - If returned stateFn(next state) is non-nil, call it
+ - If error is non-nil, run() returns the error
+ - If stateFn == nil and err == nil, run() return err == nil
+
+## Supporting types
+
+Marshalling/Unmarshalling must support(within top level struct):
+- struct
+- \*struct
+- []struct
+- []\*struct
+- []map[string]structContainer
+- [][]structContainer
+
+**Term note:** structContainer == type that has a struct or \*struct inside it
+
+We specifically do not support []interface or map[string]interface
+where the interface value would hold some value with a struct in it.
+
+Those will still marshal/unmarshal, but without support for
+AdditionalFields.
+
+## Marshalling
+
+The marshalling design will be based around a statemachine design.
+
+The basic logic is as follows:
+
+* If struct has custom marshaller, call it and return
+* If struct has field "AdditionalFields", it must be a map[string]interface{}
+* If struct does not have "AdditionalFields", give an error
+* Get struct tag detailing json names to go names, create mapping
+* For each public field name
+ - Write field name out
+ - If field value is a struct, recursively call our state machine
+ - Otherwise, use the json.Encoder to write out the value
+
+## Unmarshalling
+
+The unmarshalling desin is also based around a statemachine design. The
+basic logic is as follows:
+
+* If struct has custom marhaller, call it
+* If struct has field "AdditionalFields", it must be a map[string]interface{}
+* Get struct tag detailing json names to go names, create mapping
+* For each key found
+ - If key exists,
+ - If value is basic type, extract value into struct field using Decoder
+ - If value is struct type, recursively call statemachine
+ - If key doesn't exist, add it to AdditionalFields if it exists using Decoder
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/json.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/json.go
new file mode 100644
index 00000000000..2238521f5f9
--- /dev/null
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/json.go
@@ -0,0 +1,184 @@
+// Copyright (c) Microsoft Corporation.
+// Licensed under the MIT license.
+
+// Package json provide functions for marshalling an unmarshalling types to JSON. These functions are meant to
+// be utilized inside of structs that implement json.Unmarshaler and json.Marshaler interfaces.
+// This package provides the additional functionality of writing fields that are not in the struct when marshalling
+// to a field called AdditionalFields if that field exists and is a map[string]interface{}.
+// When marshalling, if the struct has all the same prerequisites, it will uses the keys in AdditionalFields as
+// extra fields. This package uses encoding/json underneath.
+package json
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "reflect"
+ "strings"
+)
+
+const addField = "AdditionalFields"
+const (
+ marshalJSON = "MarshalJSON"
+ unmarshalJSON = "UnmarshalJSON"
+)
+
+var (
+ leftBrace = []byte("{")[0]
+ rightBrace = []byte("}")[0]
+ comma = []byte(",")[0]
+ leftParen = []byte("[")[0]
+ rightParen = []byte("]")[0]
+)
+
+var mapStrInterType = reflect.TypeOf(map[string]interface{}{})
+
+// stateFn defines a state machine function. This will be used in all state
+// machines in this package.
+type stateFn func() (stateFn, error)
+
+// Marshal is used to marshal a type into its JSON representation. It
+// wraps the stdlib calls in order to marshal a struct or *struct so
+// that a field called "AdditionalFields" of type map[string]interface{}
+// with "-" used inside struct tag `json:"-"` can be marshalled as if
+// they were fields within the struct.
+func Marshal(i interface{}) ([]byte, error) {
+ buff := bytes.Buffer{}
+ enc := json.NewEncoder(&buff)
+ enc.SetEscapeHTML(false)
+ enc.SetIndent("", "")
+
+ v := reflect.ValueOf(i)
+ if v.Kind() != reflect.Ptr && v.CanAddr() {
+ v = v.Addr()
+ }
+ err := marshalStruct(v, &buff, enc)
+ if err != nil {
+ return nil, err
+ }
+ return buff.Bytes(), nil
+}
+
+// Unmarshal unmarshals a []byte representing JSON into i, which must be a *struct. In addition, if the struct has
+// a field called AdditionalFields of type map[string]interface{}, JSON data representing fields not in the struct
+// will be written as key/value pairs to AdditionalFields.
+func Unmarshal(b []byte, i interface{}) error {
+ if len(b) == 0 {
+ return nil
+ }
+
+ jdec := json.NewDecoder(bytes.NewBuffer(b))
+ jdec.UseNumber()
+ return unmarshalStruct(jdec, i)
+}
+
+// MarshalRaw marshals i into a json.RawMessage. If I cannot be marshalled,
+// this will panic. This is exposed to help test AdditionalField values
+// which are stored as json.RawMessage.
+func MarshalRaw(i interface{}) json.RawMessage {
+ b, err := json.Marshal(i)
+ if err != nil {
+ panic(err)
+ }
+ return json.RawMessage(b)
+}
+
+// isDelim simply tests to see if a json.Token is a delimeter.
+func isDelim(got json.Token) bool {
+ switch got.(type) {
+ case json.Delim:
+ return true
+ }
+ return false
+}
+
+// delimIs tests got to see if it is want.
+func delimIs(got json.Token, want rune) bool {
+ switch v := got.(type) {
+ case json.Delim:
+ if v == json.Delim(want) {
+ return true
+ }
+ }
+ return false
+}
+
+// hasMarshalJSON will determine if the value or a pointer to this value has
+// the MarshalJSON method.
+func hasMarshalJSON(v reflect.Value) bool {
+ if method := v.MethodByName(marshalJSON); method.Kind() != reflect.Invalid {
+ _, ok := v.Interface().(json.Marshaler)
+ return ok
+ }
+
+ if v.Kind() == reflect.Ptr {
+ v = v.Elem()
+ } else {
+ if !v.CanAddr() {
+ return false
+ }
+ v = v.Addr()
+ }
+
+ if method := v.MethodByName(marshalJSON); method.Kind() != reflect.Invalid {
+ _, ok := v.Interface().(json.Marshaler)
+ return ok
+ }
+ return false
+}
+
+// callMarshalJSON will call MarshalJSON() method on the value or a pointer to this value.
+// This will panic if the method is not defined.
+func callMarshalJSON(v reflect.Value) ([]byte, error) {
+ if method := v.MethodByName(marshalJSON); method.Kind() != reflect.Invalid {
+ marsh := v.Interface().(json.Marshaler)
+ return marsh.MarshalJSON()
+ }
+
+ if v.Kind() == reflect.Ptr {
+ v = v.Elem()
+ } else {
+ if v.CanAddr() {
+ v = v.Addr()
+ }
+ }
+
+ if method := v.MethodByName(unmarshalJSON); method.Kind() != reflect.Invalid {
+ marsh := v.Interface().(json.Marshaler)
+ return marsh.MarshalJSON()
+ }
+
+ panic(fmt.Sprintf("callMarshalJSON called on type %T that does not have MarshalJSON defined", v.Interface()))
+}
+
+// hasUnmarshalJSON will determine if the value or a pointer to this value has
+// the UnmarshalJSON method.
+func hasUnmarshalJSON(v reflect.Value) bool {
+ // You can't unmarshal on a non-pointer type.
+ if v.Kind() != reflect.Ptr {
+ if !v.CanAddr() {
+ return false
+ }
+ v = v.Addr()
+ }
+
+ if method := v.MethodByName(unmarshalJSON); method.Kind() != reflect.Invalid {
+ _, ok := v.Interface().(json.Unmarshaler)
+ return ok
+ }
+
+ return false
+}
+
+// hasOmitEmpty indicates if the field has instructed us to not output
+// the field if omitempty is set on the tag. tag is the string
+// returned by reflect.StructField.Tag().Get().
+func hasOmitEmpty(tag string) bool {
+ sl := strings.Split(tag, ",")
+ for _, str := range sl {
+ if str == "omitempty" {
+ return true
+ }
+ }
+ return false
+}
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/mapslice.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/mapslice.go
new file mode 100644
index 00000000000..cef442f25c8
--- /dev/null
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/mapslice.go
@@ -0,0 +1,333 @@
+// Copyright (c) Microsoft Corporation.
+// Licensed under the MIT license.
+
+package json
+
+import (
+ "encoding/json"
+ "fmt"
+ "reflect"
+)
+
+// unmarshalMap unmarshal's a map.
+func unmarshalMap(dec *json.Decoder, m reflect.Value) error {
+ if m.Kind() != reflect.Ptr || m.Elem().Kind() != reflect.Map {
+ panic("unmarshalMap called on non-*map value")
+ }
+ mapValueType := m.Elem().Type().Elem()
+ walk := mapWalk{dec: dec, m: m, valueType: mapValueType}
+ if err := walk.run(); err != nil {
+ return err
+ }
+ return nil
+}
+
+type mapWalk struct {
+ dec *json.Decoder
+ key string
+ m reflect.Value
+ valueType reflect.Type
+}
+
+// run runs our decoder state machine.
+func (m *mapWalk) run() error {
+ var state = m.start
+ var err error
+ for {
+ state, err = state()
+ if err != nil {
+ return err
+ }
+ if state == nil {
+ return nil
+ }
+ }
+}
+
+func (m *mapWalk) start() (stateFn, error) {
+ // maps can have custom unmarshaler's.
+ if hasUnmarshalJSON(m.m) {
+ err := m.dec.Decode(m.m.Interface())
+ if err != nil {
+ return nil, err
+ }
+ return nil, nil
+ }
+
+ // We only want to use this if the map value is:
+ // *struct/struct/map/slice
+ // otherwise use standard decode
+ t, _ := m.valueBaseType()
+ switch t.Kind() {
+ case reflect.Struct, reflect.Map, reflect.Slice:
+ delim, err := m.dec.Token()
+ if err != nil {
+ return nil, err
+ }
+ // This indicates the value was set to JSON null.
+ if delim == nil {
+ return nil, nil
+ }
+ if !delimIs(delim, '{') {
+ return nil, fmt.Errorf("Unmarshal expected opening {, received %v", delim)
+ }
+ return m.next, nil
+ case reflect.Ptr:
+ return nil, fmt.Errorf("do not support maps with values of '**type' or '*reference")
+ }
+
+ // This is a basic map type, so just use Decode().
+ if err := m.dec.Decode(m.m.Interface()); err != nil {
+ return nil, err
+ }
+
+ return nil, nil
+}
+
+func (m *mapWalk) next() (stateFn, error) {
+ if m.dec.More() {
+ key, err := m.dec.Token()
+ if err != nil {
+ return nil, err
+ }
+ m.key = key.(string)
+ return m.storeValue, nil
+ }
+ // No more entries, so remove final }.
+ _, err := m.dec.Token()
+ if err != nil {
+ return nil, err
+ }
+ return nil, nil
+}
+
+func (m *mapWalk) storeValue() (stateFn, error) {
+ v := m.valueType
+ for {
+ switch v.Kind() {
+ case reflect.Ptr:
+ v = v.Elem()
+ continue
+ case reflect.Struct:
+ return m.storeStruct, nil
+ case reflect.Map:
+ return m.storeMap, nil
+ case reflect.Slice:
+ return m.storeSlice, nil
+ }
+ return nil, fmt.Errorf("bug: mapWalk.storeValue() called on unsupported type: %v", v.Kind())
+ }
+}
+
+func (m *mapWalk) storeStruct() (stateFn, error) {
+ v := newValue(m.valueType)
+ if err := unmarshalStruct(m.dec, v.Interface()); err != nil {
+ return nil, err
+ }
+
+ if m.valueType.Kind() == reflect.Ptr {
+ m.m.Elem().SetMapIndex(reflect.ValueOf(m.key), v)
+ return m.next, nil
+ }
+ m.m.Elem().SetMapIndex(reflect.ValueOf(m.key), v.Elem())
+
+ return m.next, nil
+}
+
+func (m *mapWalk) storeMap() (stateFn, error) {
+ v := reflect.MakeMap(m.valueType)
+ ptr := newValue(v.Type())
+ ptr.Elem().Set(v)
+ if err := unmarshalMap(m.dec, ptr); err != nil {
+ return nil, err
+ }
+
+ m.m.Elem().SetMapIndex(reflect.ValueOf(m.key), v)
+
+ return m.next, nil
+}
+
+func (m *mapWalk) storeSlice() (stateFn, error) {
+ v := newValue(m.valueType)
+ if err := unmarshalSlice(m.dec, v); err != nil {
+ return nil, err
+ }
+
+ m.m.Elem().SetMapIndex(reflect.ValueOf(m.key), v.Elem())
+
+ return m.next, nil
+}
+
+// valueType returns the underlying Type. So a *struct would yield
+// struct, etc...
+func (m *mapWalk) valueBaseType() (reflect.Type, bool) {
+ ptr := false
+ v := m.valueType
+ if v.Kind() == reflect.Ptr {
+ ptr = true
+ v = v.Elem()
+ }
+ return v, ptr
+}
+
+// unmarshalSlice unmarshal's the next value, which must be a slice, into
+// ptrSlice, which must be a pointer to a slice. newValue() can be use to
+// create the slice.
+func unmarshalSlice(dec *json.Decoder, ptrSlice reflect.Value) error {
+ if ptrSlice.Kind() != reflect.Ptr || ptrSlice.Elem().Kind() != reflect.Slice {
+ panic("unmarshalSlice called on non-*[]slice value")
+ }
+ sliceValueType := ptrSlice.Elem().Type().Elem()
+ walk := sliceWalk{
+ dec: dec,
+ s: ptrSlice,
+ valueType: sliceValueType,
+ }
+ if err := walk.run(); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+type sliceWalk struct {
+ dec *json.Decoder
+ s reflect.Value // *[]slice
+ valueType reflect.Type
+}
+
+// run runs our decoder state machine.
+func (s *sliceWalk) run() error {
+ var state = s.start
+ var err error
+ for {
+ state, err = state()
+ if err != nil {
+ return err
+ }
+ if state == nil {
+ return nil
+ }
+ }
+}
+
+func (s *sliceWalk) start() (stateFn, error) {
+ // slices can have custom unmarshaler's.
+ if hasUnmarshalJSON(s.s) {
+ err := s.dec.Decode(s.s.Interface())
+ if err != nil {
+ return nil, err
+ }
+ return nil, nil
+ }
+
+ // We only want to use this if the slice value is:
+ // []*struct/[]struct/[]map/[]slice
+ // otherwise use standard decode
+ t := s.valueBaseType()
+
+ switch t.Kind() {
+ case reflect.Ptr:
+ return nil, fmt.Errorf("cannot unmarshal into a ** or *")
+ case reflect.Struct, reflect.Map, reflect.Slice:
+ delim, err := s.dec.Token()
+ if err != nil {
+ return nil, err
+ }
+ // This indicates the value was set to nil.
+ if delim == nil {
+ return nil, nil
+ }
+ if !delimIs(delim, '[') {
+ return nil, fmt.Errorf("Unmarshal expected opening [, received %v", delim)
+ }
+ return s.next, nil
+ }
+
+ if err := s.dec.Decode(s.s.Interface()); err != nil {
+ return nil, err
+ }
+ return nil, nil
+}
+
+func (s *sliceWalk) next() (stateFn, error) {
+ if s.dec.More() {
+ return s.storeValue, nil
+ }
+ // Nothing left in the slice, remove closing ]
+ _, err := s.dec.Token()
+ return nil, err
+}
+
+func (s *sliceWalk) storeValue() (stateFn, error) {
+ t := s.valueBaseType()
+ switch t.Kind() {
+ case reflect.Ptr:
+ return nil, fmt.Errorf("do not support 'pointer to pointer' or 'pointer to reference' types")
+ case reflect.Struct:
+ return s.storeStruct, nil
+ case reflect.Map:
+ return s.storeMap, nil
+ case reflect.Slice:
+ return s.storeSlice, nil
+ }
+ return nil, fmt.Errorf("bug: sliceWalk.storeValue() called on unsupported type: %v", t.Kind())
+}
+
+func (s *sliceWalk) storeStruct() (stateFn, error) {
+ v := newValue(s.valueType)
+ if err := unmarshalStruct(s.dec, v.Interface()); err != nil {
+ return nil, err
+ }
+
+ if s.valueType.Kind() == reflect.Ptr {
+ s.s.Elem().Set(reflect.Append(s.s.Elem(), v))
+ return s.next, nil
+ }
+
+ s.s.Elem().Set(reflect.Append(s.s.Elem(), v.Elem()))
+ return s.next, nil
+}
+
+func (s *sliceWalk) storeMap() (stateFn, error) {
+ v := reflect.MakeMap(s.valueType)
+ ptr := newValue(v.Type())
+ ptr.Elem().Set(v)
+
+ if err := unmarshalMap(s.dec, ptr); err != nil {
+ return nil, err
+ }
+
+ s.s.Elem().Set(reflect.Append(s.s.Elem(), v))
+
+ return s.next, nil
+}
+
+func (s *sliceWalk) storeSlice() (stateFn, error) {
+ v := newValue(s.valueType)
+ if err := unmarshalSlice(s.dec, v); err != nil {
+ return nil, err
+ }
+
+ s.s.Elem().Set(reflect.Append(s.s.Elem(), v.Elem()))
+
+ return s.next, nil
+}
+
+// valueType returns the underlying Type. So a *struct would yield
+// struct, etc...
+func (s *sliceWalk) valueBaseType() reflect.Type {
+ v := s.valueType
+ if v.Kind() == reflect.Ptr {
+ v = v.Elem()
+ }
+ return v
+}
+
+// newValue() returns a new *type that represents type passed.
+func newValue(valueType reflect.Type) reflect.Value {
+ if valueType.Kind() == reflect.Ptr {
+ return reflect.New(valueType.Elem())
+ }
+ return reflect.New(valueType)
+}
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/marshal.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/marshal.go
new file mode 100644
index 00000000000..df5dc6e11b5
--- /dev/null
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/marshal.go
@@ -0,0 +1,346 @@
+// Copyright (c) Microsoft Corporation.
+// Licensed under the MIT license.
+
+package json
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "reflect"
+ "unicode"
+)
+
+// marshalStruct takes in i, which must be a *struct or struct and marshals its content
+// as JSON into buff (sometimes with writes to buff directly, sometimes via enc).
+// This call is recursive for all fields of *struct or struct type.
+func marshalStruct(v reflect.Value, buff *bytes.Buffer, enc *json.Encoder) error {
+ if v.Kind() == reflect.Ptr {
+ v = v.Elem()
+ }
+ // We only care about custom Marshalling a struct.
+ if v.Kind() != reflect.Struct {
+ return fmt.Errorf("bug: marshal() received a non *struct or struct, received type %T", v.Interface())
+ }
+
+ if hasMarshalJSON(v) {
+ b, err := callMarshalJSON(v)
+ if err != nil {
+ return err
+ }
+ buff.Write(b)
+ return nil
+ }
+
+ t := v.Type()
+
+ // If it has an AdditionalFields field make sure its the right type.
+ f := v.FieldByName(addField)
+ if f.Kind() != reflect.Invalid {
+ if f.Kind() != reflect.Map {
+ return fmt.Errorf("type %T has field 'AdditionalFields' that is not a map[string]interface{}", v.Interface())
+ }
+ if !f.Type().AssignableTo(mapStrInterType) {
+ return fmt.Errorf("type %T has field 'AdditionalFields' that is not a map[string]interface{}", v.Interface())
+ }
+ }
+
+ translator, err := findFields(v)
+ if err != nil {
+ return err
+ }
+
+ buff.WriteByte(leftBrace)
+ for x := 0; x < v.NumField(); x++ {
+ field := v.Field(x)
+
+ // We don't access private fields.
+ if unicode.IsLower(rune(t.Field(x).Name[0])) {
+ continue
+ }
+
+ if t.Field(x).Name == addField {
+ if v.Field(x).Len() > 0 {
+ if err := writeAddFields(field.Interface(), buff, enc); err != nil {
+ return err
+ }
+ buff.WriteByte(comma)
+ }
+ continue
+ }
+
+ // If they have omitempty set, we don't write out the field if
+ // it is the zero value.
+ if hasOmitEmpty(t.Field(x).Tag.Get("json")) {
+ if v.Field(x).IsZero() {
+ continue
+ }
+ }
+
+ // Write out the field name part.
+ jsonName := translator.jsonName(t.Field(x).Name)
+ buff.WriteString(fmt.Sprintf("%q:", jsonName))
+
+ if field.Kind() == reflect.Ptr {
+ field = field.Elem()
+ }
+
+ if err := marshalStructField(field, buff, enc); err != nil {
+ return err
+ }
+ }
+
+ buff.Truncate(buff.Len() - 1) // Remove final comma
+ buff.WriteByte(rightBrace)
+
+ return nil
+}
+
+func marshalStructField(field reflect.Value, buff *bytes.Buffer, enc *json.Encoder) error {
+ // Determine if we need a trailing comma.
+ defer buff.WriteByte(comma)
+
+ switch field.Kind() {
+ // If it was a *struct or struct, we need to recursively all marshal().
+ case reflect.Struct:
+ if field.CanAddr() {
+ field = field.Addr()
+ }
+ return marshalStruct(field, buff, enc)
+ case reflect.Map:
+ return marshalMap(field, buff, enc)
+ case reflect.Slice:
+ return marshalSlice(field, buff, enc)
+ }
+
+ // It is just a basic type, so encode it.
+ if err := enc.Encode(field.Interface()); err != nil {
+ return err
+ }
+ buff.Truncate(buff.Len() - 1) // Remove Encode() added \n
+
+ return nil
+}
+
+func marshalMap(v reflect.Value, buff *bytes.Buffer, enc *json.Encoder) error {
+ if v.Kind() != reflect.Map {
+ return fmt.Errorf("bug: marshalMap() called on %T", v.Interface())
+ }
+ if v.Len() == 0 {
+ buff.WriteByte(leftBrace)
+ buff.WriteByte(rightBrace)
+ return nil
+ }
+ encoder := mapEncode{m: v, buff: buff, enc: enc}
+ return encoder.run()
+}
+
+type mapEncode struct {
+ m reflect.Value
+ buff *bytes.Buffer
+ enc *json.Encoder
+
+ valueBaseType reflect.Type
+}
+
+// run runs our encoder state machine.
+func (m *mapEncode) run() error {
+ var state = m.start
+ var err error
+ for {
+ state, err = state()
+ if err != nil {
+ return err
+ }
+ if state == nil {
+ return nil
+ }
+ }
+}
+
+func (m *mapEncode) start() (stateFn, error) {
+ if hasMarshalJSON(m.m) {
+ b, err := callMarshalJSON(m.m)
+ if err != nil {
+ return nil, err
+ }
+ m.buff.Write(b)
+ return nil, nil
+ }
+
+ valueBaseType := m.m.Type().Elem()
+ if valueBaseType.Kind() == reflect.Ptr {
+ valueBaseType = valueBaseType.Elem()
+ }
+ m.valueBaseType = valueBaseType
+
+ switch valueBaseType.Kind() {
+ case reflect.Ptr:
+ return nil, fmt.Errorf("Marshal does not support ** or *")
+ case reflect.Struct, reflect.Map, reflect.Slice:
+ return m.encode, nil
+ }
+
+ // If the map value doesn't have a struct/map/slice, just Encode() it.
+ if err := m.enc.Encode(m.m.Interface()); err != nil {
+ return nil, err
+ }
+ m.buff.Truncate(m.buff.Len() - 1) // Remove Encode() added \n
+ return nil, nil
+}
+
+func (m *mapEncode) encode() (stateFn, error) {
+ m.buff.WriteByte(leftBrace)
+
+ iter := m.m.MapRange()
+ for iter.Next() {
+ // Write the key.
+ k := iter.Key()
+ m.buff.WriteString(fmt.Sprintf("%q:", k.String()))
+
+ v := iter.Value()
+ switch m.valueBaseType.Kind() {
+ case reflect.Struct:
+ if v.CanAddr() {
+ v = v.Addr()
+ }
+ if err := marshalStruct(v, m.buff, m.enc); err != nil {
+ return nil, err
+ }
+ case reflect.Map:
+ if err := marshalMap(v, m.buff, m.enc); err != nil {
+ return nil, err
+ }
+ case reflect.Slice:
+ if err := marshalSlice(v, m.buff, m.enc); err != nil {
+ return nil, err
+ }
+ default:
+ panic(fmt.Sprintf("critical bug: mapEncode.encode() called with value base type: %v", m.valueBaseType.Kind()))
+ }
+ m.buff.WriteByte(comma)
+ }
+ m.buff.Truncate(m.buff.Len() - 1) // Remove final comma
+ m.buff.WriteByte(rightBrace)
+
+ return nil, nil
+}
+
+func marshalSlice(v reflect.Value, buff *bytes.Buffer, enc *json.Encoder) error {
+ if v.Kind() != reflect.Slice {
+ return fmt.Errorf("bug: marshalSlice() called on %T", v.Interface())
+ }
+ if v.Len() == 0 {
+ buff.WriteByte(leftParen)
+ buff.WriteByte(rightParen)
+ return nil
+ }
+ encoder := sliceEncode{s: v, buff: buff, enc: enc}
+ return encoder.run()
+}
+
+type sliceEncode struct {
+ s reflect.Value
+ buff *bytes.Buffer
+ enc *json.Encoder
+
+ valueBaseType reflect.Type
+}
+
+// run runs our encoder state machine.
+func (s *sliceEncode) run() error {
+ var state = s.start
+ var err error
+ for {
+ state, err = state()
+ if err != nil {
+ return err
+ }
+ if state == nil {
+ return nil
+ }
+ }
+}
+
+func (s *sliceEncode) start() (stateFn, error) {
+ if hasMarshalJSON(s.s) {
+ b, err := callMarshalJSON(s.s)
+ if err != nil {
+ return nil, err
+ }
+ s.buff.Write(b)
+ return nil, nil
+ }
+
+ valueBaseType := s.s.Type().Elem()
+ if valueBaseType.Kind() == reflect.Ptr {
+ valueBaseType = valueBaseType.Elem()
+ }
+ s.valueBaseType = valueBaseType
+
+ switch valueBaseType.Kind() {
+ case reflect.Ptr:
+ return nil, fmt.Errorf("Marshal does not support ** or *")
+ case reflect.Struct, reflect.Map, reflect.Slice:
+ return s.encode, nil
+ }
+
+ // If the map value doesn't have a struct/map/slice, just Encode() it.
+ if err := s.enc.Encode(s.s.Interface()); err != nil {
+ return nil, err
+ }
+ s.buff.Truncate(s.buff.Len() - 1) // Remove Encode added \n
+
+ return nil, nil
+}
+
+func (s *sliceEncode) encode() (stateFn, error) {
+ s.buff.WriteByte(leftParen)
+ for i := 0; i < s.s.Len(); i++ {
+ v := s.s.Index(i)
+ switch s.valueBaseType.Kind() {
+ case reflect.Struct:
+ if v.CanAddr() {
+ v = v.Addr()
+ }
+ if err := marshalStruct(v, s.buff, s.enc); err != nil {
+ return nil, err
+ }
+ case reflect.Map:
+ if err := marshalMap(v, s.buff, s.enc); err != nil {
+ return nil, err
+ }
+ case reflect.Slice:
+ if err := marshalSlice(v, s.buff, s.enc); err != nil {
+ return nil, err
+ }
+ default:
+ panic(fmt.Sprintf("critical bug: mapEncode.encode() called with value base type: %v", s.valueBaseType.Kind()))
+ }
+ s.buff.WriteByte(comma)
+ }
+ s.buff.Truncate(s.buff.Len() - 1) // Remove final comma
+ s.buff.WriteByte(rightParen)
+ return nil, nil
+}
+
+// writeAddFields writes the AdditionalFields struct field out to JSON as field
+// values. i must be a map[string]interface{} or this will panic.
+func writeAddFields(i interface{}, buff *bytes.Buffer, enc *json.Encoder) error {
+ m := i.(map[string]interface{})
+
+ x := 0
+ for k, v := range m {
+ buff.WriteString(fmt.Sprintf("%q:", k))
+ if err := enc.Encode(v); err != nil {
+ return err
+ }
+ buff.Truncate(buff.Len() - 1) // Remove Encode() added \n
+
+ if x+1 != len(m) {
+ buff.WriteByte(comma)
+ }
+ x++
+ }
+ return nil
+}
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/struct.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/struct.go
new file mode 100644
index 00000000000..07751544a28
--- /dev/null
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/struct.go
@@ -0,0 +1,290 @@
+// Copyright (c) Microsoft Corporation.
+// Licensed under the MIT license.
+
+package json
+
+import (
+ "encoding/json"
+ "fmt"
+ "reflect"
+ "strings"
+)
+
+func unmarshalStruct(jdec *json.Decoder, i interface{}) error {
+ v := reflect.ValueOf(i)
+ if v.Kind() != reflect.Ptr {
+ return fmt.Errorf("Unmarshal() received type %T, which is not a *struct", i)
+ }
+ v = v.Elem()
+ if v.Kind() != reflect.Struct {
+ return fmt.Errorf("Unmarshal() received type %T, which is not a *struct", i)
+ }
+
+ if hasUnmarshalJSON(v) {
+ // Indicates that this type has a custom Unmarshaler.
+ return jdec.Decode(v.Addr().Interface())
+ }
+
+ f := v.FieldByName(addField)
+ if f.Kind() == reflect.Invalid {
+ return fmt.Errorf("Unmarshal(%T) only supports structs that have the field AdditionalFields or implements json.Unmarshaler", i)
+ }
+
+ if f.Kind() != reflect.Map || !f.Type().AssignableTo(mapStrInterType) {
+ return fmt.Errorf("type %T has field 'AdditionalFields' that is not a map[string]interface{}", i)
+ }
+
+ dec := newDecoder(jdec, v)
+ return dec.run()
+}
+
+type decoder struct {
+ dec *json.Decoder
+ value reflect.Value // This will be a reflect.Struct
+ translator translateFields
+ key string
+}
+
+func newDecoder(dec *json.Decoder, value reflect.Value) *decoder {
+ return &decoder{value: value, dec: dec}
+}
+
+// run runs our decoder state machine.
+func (d *decoder) run() error {
+ var state = d.start
+ var err error
+ for {
+ state, err = state()
+ if err != nil {
+ return err
+ }
+ if state == nil {
+ return nil
+ }
+ }
+}
+
+// start looks for our opening delimeter '{' and then transitions to looping through our fields.
+func (d *decoder) start() (stateFn, error) {
+ var err error
+ d.translator, err = findFields(d.value)
+ if err != nil {
+ return nil, err
+ }
+
+ delim, err := d.dec.Token()
+ if err != nil {
+ return nil, err
+ }
+ if !delimIs(delim, '{') {
+ return nil, fmt.Errorf("Unmarshal expected opening {, received %v", delim)
+ }
+
+ return d.next, nil
+}
+
+// next gets the next struct field name from the raw json or stops the machine if we get our closing }.
+func (d *decoder) next() (stateFn, error) {
+ if !d.dec.More() {
+ // Remove the closing }.
+ if _, err := d.dec.Token(); err != nil {
+ return nil, err
+ }
+ return nil, nil
+ }
+
+ key, err := d.dec.Token()
+ if err != nil {
+ return nil, err
+ }
+
+ d.key = key.(string)
+ return d.storeValue, nil
+}
+
+// storeValue takes the next value and stores it our struct. If the field can't be found
+// in the struct, it pushes the operation to storeAdditional().
+func (d *decoder) storeValue() (stateFn, error) {
+ goName := d.translator.goName(d.key)
+ if goName == "" {
+ goName = d.key
+ }
+
+ // We don't have the field in the struct, so it goes in AdditionalFields.
+ f := d.value.FieldByName(goName)
+ if f.Kind() == reflect.Invalid {
+ return d.storeAdditional, nil
+ }
+
+ // Indicates that this type has a custom Unmarshaler.
+ if hasUnmarshalJSON(f) {
+ err := d.dec.Decode(f.Addr().Interface())
+ if err != nil {
+ return nil, err
+ }
+ return d.next, nil
+ }
+
+ t, isPtr, err := fieldBaseType(d.value, goName)
+ if err != nil {
+ return nil, fmt.Errorf("type(%s) had field(%s) %w", d.value.Type().Name(), goName, err)
+ }
+
+ switch t.Kind() {
+ // We need to recursively call ourselves on any *struct or struct.
+ case reflect.Struct:
+ if isPtr {
+ if f.IsNil() {
+ f.Set(reflect.New(t))
+ }
+ } else {
+ f = f.Addr()
+ }
+ if err := unmarshalStruct(d.dec, f.Interface()); err != nil {
+ return nil, err
+ }
+ return d.next, nil
+ case reflect.Map:
+ v := reflect.MakeMap(f.Type())
+ ptr := newValue(f.Type())
+ ptr.Elem().Set(v)
+ if err := unmarshalMap(d.dec, ptr); err != nil {
+ return nil, err
+ }
+ f.Set(ptr.Elem())
+ return d.next, nil
+ case reflect.Slice:
+ v := reflect.MakeSlice(f.Type(), 0, 0)
+ ptr := newValue(f.Type())
+ ptr.Elem().Set(v)
+ if err := unmarshalSlice(d.dec, ptr); err != nil {
+ return nil, err
+ }
+ f.Set(ptr.Elem())
+ return d.next, nil
+ }
+
+ if !isPtr {
+ f = f.Addr()
+ }
+
+ // For values that are pointers, we need them to be non-nil in order
+ // to decode into them.
+ if f.IsNil() {
+ f.Set(reflect.New(t))
+ }
+
+ if err := d.dec.Decode(f.Interface()); err != nil {
+ return nil, err
+ }
+
+ return d.next, nil
+}
+
+// storeAdditional pushes the key/value into our .AdditionalFields map.
+func (d *decoder) storeAdditional() (stateFn, error) {
+ rw := json.RawMessage{}
+ if err := d.dec.Decode(&rw); err != nil {
+ return nil, err
+ }
+ field := d.value.FieldByName(addField)
+ if field.IsNil() {
+ field.Set(reflect.MakeMap(field.Type()))
+ }
+ field.SetMapIndex(reflect.ValueOf(d.key), reflect.ValueOf(rw))
+ return d.next, nil
+}
+
+func fieldBaseType(v reflect.Value, fieldName string) (t reflect.Type, isPtr bool, err error) {
+ sf, ok := v.Type().FieldByName(fieldName)
+ if !ok {
+ return nil, false, fmt.Errorf("bug: fieldBaseType() lookup of field(%s) on type(%s): do not have field", fieldName, v.Type().Name())
+ }
+ t = sf.Type
+ if t.Kind() == reflect.Ptr {
+ t = t.Elem()
+ isPtr = true
+ }
+ if t.Kind() == reflect.Ptr {
+ return nil, isPtr, fmt.Errorf("received pointer to pointer type, not supported")
+ }
+ return t, isPtr, nil
+}
+
+type translateField struct {
+ jsonName string
+ goName string
+}
+
+// translateFields is a list of translateFields with a handy lookup method.
+type translateFields []translateField
+
+// goName loops through a list of fields looking for one contaning the jsonName and
+// returning the goName. If not found, returns the empty string.
+// Note: not a map because at this size slices are faster even in tight loops.
+func (t translateFields) goName(jsonName string) string {
+ for _, entry := range t {
+ if entry.jsonName == jsonName {
+ return entry.goName
+ }
+ }
+ return ""
+}
+
+// jsonName loops through a list of fields looking for one contaning the goName and
+// returning the jsonName. If not found, returns the empty string.
+// Note: not a map because at this size slices are faster even in tight loops.
+func (t translateFields) jsonName(goName string) string {
+ for _, entry := range t {
+ if entry.goName == goName {
+ return entry.jsonName
+ }
+ }
+ return ""
+}
+
+var umarshalerType = reflect.TypeOf((*json.Unmarshaler)(nil)).Elem()
+
+// findFields parses a struct and writes the field tags for lookup. It will return an error
+// if any field has a type of *struct or struct that does not implement json.Marshaler.
+func findFields(v reflect.Value) (translateFields, error) {
+ if v.Kind() == reflect.Ptr {
+ v = v.Elem()
+ }
+ if v.Kind() != reflect.Struct {
+ return nil, fmt.Errorf("findFields received a %s type, expected *struct or struct", v.Type().Name())
+ }
+ tfs := make([]translateField, 0, v.NumField())
+ for i := 0; i < v.NumField(); i++ {
+ tf := translateField{
+ goName: v.Type().Field(i).Name,
+ jsonName: parseTag(v.Type().Field(i).Tag.Get("json")),
+ }
+ switch tf.jsonName {
+ case "", "-":
+ tf.jsonName = tf.goName
+ }
+ tfs = append(tfs, tf)
+
+ f := v.Field(i)
+ if f.Kind() == reflect.Ptr {
+ f = f.Elem()
+ }
+ if f.Kind() == reflect.Struct {
+ if f.Type().Implements(umarshalerType) {
+ return nil, fmt.Errorf("struct type %q which has field %q which "+
+ "doesn't implement json.Unmarshaler", v.Type().Name(), v.Type().Field(i).Name)
+ }
+ }
+ }
+ return tfs, nil
+}
+
+// parseTag just returns the first entry in the tag. tag is the string
+// returned by reflect.StructField.Tag().Get().
+func parseTag(tag string) string {
+ if idx := strings.Index(tag, ","); idx != -1 {
+ return tag[:idx]
+ }
+ return tag
+}
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/types/time/time.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/types/time/time.go
new file mode 100644
index 00000000000..a1c99621e9f
--- /dev/null
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/types/time/time.go
@@ -0,0 +1,70 @@
+// Copyright (c) Microsoft Corporation.
+// Licensed under the MIT license.
+
+// Package time provides for custom types to translate time from JSON and other formats
+// into time.Time objects.
+package time
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// Unix provides a type that can marshal and unmarshal a string representation
+// of the unix epoch into a time.Time object.
+type Unix struct {
+ T time.Time
+}
+
+// MarshalJSON implements encoding/json.MarshalJSON().
+func (u Unix) MarshalJSON() ([]byte, error) {
+ if u.T.IsZero() {
+ return []byte(""), nil
+ }
+ return []byte(fmt.Sprintf("%q", strconv.FormatInt(u.T.Unix(), 10))), nil
+}
+
+// UnmarshalJSON implements encoding/json.UnmarshalJSON().
+func (u *Unix) UnmarshalJSON(b []byte) error {
+ i, err := strconv.Atoi(strings.Trim(string(b), `"`))
+ if err != nil {
+ return fmt.Errorf("unix time(%s) could not be converted from string to int: %w", string(b), err)
+ }
+ u.T = time.Unix(int64(i), 0)
+ return nil
+}
+
+// DurationTime provides a type that can marshal and unmarshal a string representation
+// of a duration from now into a time.Time object.
+// Note: I'm not sure this is the best way to do this. What happens is we get a field
+// called "expires_in" that represents the seconds from now that this expires. We
+// turn that into a time we call .ExpiresOn. But maybe we should be recording
+// when the token was received at .TokenRecieved and .ExpiresIn should remain as a duration.
+// Then we could have a method called ExpiresOn(). Honestly, the whole thing is
+// bad because the server doesn't return a concrete time. I think this is
+// cleaner, but its not great either.
+type DurationTime struct {
+ T time.Time
+}
+
+// MarshalJSON implements encoding/json.MarshalJSON().
+func (d DurationTime) MarshalJSON() ([]byte, error) {
+ if d.T.IsZero() {
+ return []byte(""), nil
+ }
+
+ dt := time.Until(d.T)
+ return []byte(fmt.Sprintf("%d", int64(dt*time.Second))), nil
+}
+
+// UnmarshalJSON implements encoding/json.UnmarshalJSON().
+func (d *DurationTime) UnmarshalJSON(b []byte) error {
+ i, err := strconv.Atoi(strings.Trim(string(b), `"`))
+ if err != nil {
+ return fmt.Errorf("unix time(%s) could not be converted from string to int: %w", string(b), err)
+ }
+ d.T = time.Now().Add(time.Duration(i) * time.Second)
+ return nil
+}
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/local/server.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/local/server.go
new file mode 100644
index 00000000000..04236ff3127
--- /dev/null
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/local/server.go
@@ -0,0 +1,177 @@
+// Copyright (c) Microsoft Corporation.
+// Licensed under the MIT license.
+
+// Package local contains a local HTTP server used with interactive authentication.
+package local
+
+import (
+ "context"
+ "fmt"
+ "net"
+ "net/http"
+ "strconv"
+ "strings"
+ "time"
+)
+
+var okPage = []byte(`
+
+
+
+
+ Authentication Complete
+
+
+ Authentication complete. You can return to the application. Feel free to close this browser tab.
+
+
+`)
+
+const failPage = `
+
+
+
+
+ Authentication Failed
+
+
+ Authentication failed. You can return to the application. Feel free to close this browser tab.
+ Error details: error %s error_description: %s
+
+
+`
+
+// Result is the result from the redirect.
+type Result struct {
+ // Code is the code sent by the authority server.
+ Code string
+ // Err is set if there was an error.
+ Err error
+}
+
+// Server is an HTTP server.
+type Server struct {
+ // Addr is the address the server is listening on.
+ Addr string
+ resultCh chan Result
+ s *http.Server
+ reqState string
+}
+
+// New creates a local HTTP server and starts it.
+func New(reqState string, port int) (*Server, error) {
+ var l net.Listener
+ var err error
+ var portStr string
+ if port > 0 {
+ // use port provided by caller
+ l, err = net.Listen("tcp", fmt.Sprintf("localhost:%d", port))
+ portStr = strconv.FormatInt(int64(port), 10)
+ } else {
+ // find a free port
+ for i := 0; i < 10; i++ {
+ l, err = net.Listen("tcp", "localhost:0")
+ if err != nil {
+ continue
+ }
+ addr := l.Addr().String()
+ portStr = addr[strings.LastIndex(addr, ":")+1:]
+ break
+ }
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ serv := &Server{
+ Addr: fmt.Sprintf("http://localhost:%s", portStr),
+ s: &http.Server{Addr: "localhost:0", ReadHeaderTimeout: time.Second},
+ reqState: reqState,
+ resultCh: make(chan Result, 1),
+ }
+ serv.s.Handler = http.HandlerFunc(serv.handler)
+
+ if err := serv.start(l); err != nil {
+ return nil, err
+ }
+
+ return serv, nil
+}
+
+func (s *Server) start(l net.Listener) error {
+ go func() {
+ err := s.s.Serve(l)
+ if err != nil {
+ select {
+ case s.resultCh <- Result{Err: err}:
+ default:
+ }
+ }
+ }()
+
+ return nil
+}
+
+// Result gets the result of the redirect operation. Once a single result is returned, the server
+// is shutdown. ctx deadline will be honored.
+func (s *Server) Result(ctx context.Context) Result {
+ select {
+ case <-ctx.Done():
+ return Result{Err: ctx.Err()}
+ case r := <-s.resultCh:
+ return r
+ }
+}
+
+// Shutdown shuts down the server.
+func (s *Server) Shutdown() {
+ // Note: You might get clever and think you can do this in handler() as a defer, you can't.
+ _ = s.s.Shutdown(context.Background())
+}
+
+func (s *Server) putResult(r Result) {
+ select {
+ case s.resultCh <- r:
+ default:
+ }
+}
+
+func (s *Server) handler(w http.ResponseWriter, r *http.Request) {
+ q := r.URL.Query()
+
+ headerErr := q.Get("error")
+ if headerErr != "" {
+ desc := q.Get("error_description")
+ // Note: It is a little weird we handle some errors by not going to the failPage. If they all should,
+ // change this to s.error() and make s.error() write the failPage instead of an error code.
+ _, _ = w.Write([]byte(fmt.Sprintf(failPage, headerErr, desc)))
+ s.putResult(Result{Err: fmt.Errorf(desc)})
+ return
+ }
+
+ respState := q.Get("state")
+ switch respState {
+ case s.reqState:
+ case "":
+ s.error(w, http.StatusInternalServerError, "server didn't send OAuth state")
+ return
+ default:
+ s.error(w, http.StatusInternalServerError, "mismatched OAuth state, req(%s), resp(%s)", s.reqState, respState)
+ return
+ }
+
+ code := q.Get("code")
+ if code == "" {
+ s.error(w, http.StatusInternalServerError, "authorization code missing in query string")
+ return
+ }
+
+ _, _ = w.Write(okPage)
+ s.putResult(Result{Code: code})
+}
+
+func (s *Server) error(w http.ResponseWriter, code int, str string, i ...interface{}) {
+ err := fmt.Errorf(str, i...)
+ http.Error(w, err.Error(), code)
+ s.putResult(Result{Err: err})
+}
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/oauth.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/oauth.go
new file mode 100644
index 00000000000..6b4016c1166
--- /dev/null
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/oauth.go
@@ -0,0 +1,296 @@
+// Copyright (c) Microsoft Corporation.
+// Licensed under the MIT license.
+
+package oauth
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "io"
+ "time"
+
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors"
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/exported"
+ internalTime "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/types/time"
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops"
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens"
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority"
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust"
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs"
+ "github.com/google/uuid"
+)
+
+// ResolveEndpointer contains the methods for resolving authority endpoints.
+type ResolveEndpointer interface {
+ ResolveEndpoints(ctx context.Context, authorityInfo authority.Info, userPrincipalName string) (authority.Endpoints, error)
+}
+
+// AccessTokens contains the methods for fetching tokens from different sources.
+type AccessTokens interface {
+ DeviceCodeResult(ctx context.Context, authParameters authority.AuthParams) (accesstokens.DeviceCodeResult, error)
+ FromUsernamePassword(ctx context.Context, authParameters authority.AuthParams) (accesstokens.TokenResponse, error)
+ FromAuthCode(ctx context.Context, req accesstokens.AuthCodeRequest) (accesstokens.TokenResponse, error)
+ FromRefreshToken(ctx context.Context, appType accesstokens.AppType, authParams authority.AuthParams, cc *accesstokens.Credential, refreshToken string) (accesstokens.TokenResponse, error)
+ FromClientSecret(ctx context.Context, authParameters authority.AuthParams, clientSecret string) (accesstokens.TokenResponse, error)
+ FromAssertion(ctx context.Context, authParameters authority.AuthParams, assertion string) (accesstokens.TokenResponse, error)
+ FromUserAssertionClientSecret(ctx context.Context, authParameters authority.AuthParams, userAssertion string, clientSecret string) (accesstokens.TokenResponse, error)
+ FromUserAssertionClientCertificate(ctx context.Context, authParameters authority.AuthParams, userAssertion string, assertion string) (accesstokens.TokenResponse, error)
+ FromDeviceCodeResult(ctx context.Context, authParameters authority.AuthParams, deviceCodeResult accesstokens.DeviceCodeResult) (accesstokens.TokenResponse, error)
+ FromSamlGrant(ctx context.Context, authParameters authority.AuthParams, samlGrant wstrust.SamlTokenInfo) (accesstokens.TokenResponse, error)
+}
+
+// FetchAuthority will be implemented by authority.Authority.
+type FetchAuthority interface {
+ UserRealm(context.Context, authority.AuthParams) (authority.UserRealm, error)
+ AADInstanceDiscovery(context.Context, authority.Info) (authority.InstanceDiscoveryResponse, error)
+}
+
+// FetchWSTrust contains the methods for interacting with WSTrust endpoints.
+type FetchWSTrust interface {
+ Mex(ctx context.Context, federationMetadataURL string) (defs.MexDocument, error)
+ SAMLTokenInfo(ctx context.Context, authParameters authority.AuthParams, cloudAudienceURN string, endpoint defs.Endpoint) (wstrust.SamlTokenInfo, error)
+}
+
+// Client provides tokens for various types of token requests.
+type Client struct {
+ Resolver ResolveEndpointer
+ AccessTokens AccessTokens
+ Authority FetchAuthority
+ WSTrust FetchWSTrust
+}
+
+// New is the constructor for Token.
+func New(httpClient ops.HTTPClient) *Client {
+ r := ops.New(httpClient)
+ return &Client{
+ Resolver: newAuthorityEndpoint(r),
+ AccessTokens: r.AccessTokens(),
+ Authority: r.Authority(),
+ WSTrust: r.WSTrust(),
+ }
+}
+
+// ResolveEndpoints gets the authorization and token endpoints and creates an AuthorityEndpoints instance.
+func (t *Client) ResolveEndpoints(ctx context.Context, authorityInfo authority.Info, userPrincipalName string) (authority.Endpoints, error) {
+ return t.Resolver.ResolveEndpoints(ctx, authorityInfo, userPrincipalName)
+}
+
+func (t *Client) AADInstanceDiscovery(ctx context.Context, authorityInfo authority.Info) (authority.InstanceDiscoveryResponse, error) {
+ return t.Authority.AADInstanceDiscovery(ctx, authorityInfo)
+}
+
+// AuthCode returns a token based on an authorization code.
+func (t *Client) AuthCode(ctx context.Context, req accesstokens.AuthCodeRequest) (accesstokens.TokenResponse, error) {
+ if err := t.resolveEndpoint(ctx, &req.AuthParams, ""); err != nil {
+ return accesstokens.TokenResponse{}, err
+ }
+
+ tResp, err := t.AccessTokens.FromAuthCode(ctx, req)
+ if err != nil {
+ return accesstokens.TokenResponse{}, fmt.Errorf("could not retrieve token from auth code: %w", err)
+ }
+ return tResp, nil
+}
+
+// Credential acquires a token from the authority using a client credentials grant.
+func (t *Client) Credential(ctx context.Context, authParams authority.AuthParams, cred *accesstokens.Credential) (accesstokens.TokenResponse, error) {
+ if cred.TokenProvider != nil {
+ now := time.Now()
+ scopes := make([]string, len(authParams.Scopes))
+ copy(scopes, authParams.Scopes)
+ params := exported.TokenProviderParameters{
+ CorrelationID: uuid.New().String(),
+ Scopes: scopes,
+ }
+ tr, err := cred.TokenProvider(ctx, params)
+ if err != nil {
+ return accesstokens.TokenResponse{}, err
+ }
+ return accesstokens.TokenResponse{
+ AccessToken: tr.AccessToken,
+ ExpiresOn: internalTime.DurationTime{
+ T: now.Add(time.Duration(tr.ExpiresInSeconds) * time.Second),
+ },
+ GrantedScopes: accesstokens.Scopes{Slice: authParams.Scopes},
+ }, nil
+ }
+
+ if err := t.resolveEndpoint(ctx, &authParams, ""); err != nil {
+ return accesstokens.TokenResponse{}, err
+ }
+
+ if cred.Secret != "" {
+ return t.AccessTokens.FromClientSecret(ctx, authParams, cred.Secret)
+ }
+ jwt, err := cred.JWT(ctx, authParams)
+ if err != nil {
+ return accesstokens.TokenResponse{}, err
+ }
+ return t.AccessTokens.FromAssertion(ctx, authParams, jwt)
+}
+
+// Credential acquires a token from the authority using a client credentials grant.
+func (t *Client) OnBehalfOf(ctx context.Context, authParams authority.AuthParams, cred *accesstokens.Credential) (accesstokens.TokenResponse, error) {
+ if err := t.resolveEndpoint(ctx, &authParams, ""); err != nil {
+ return accesstokens.TokenResponse{}, err
+ }
+
+ if cred.Secret != "" {
+ return t.AccessTokens.FromUserAssertionClientSecret(ctx, authParams, authParams.UserAssertion, cred.Secret)
+
+ }
+ jwt, err := cred.JWT(ctx, authParams)
+ if err != nil {
+ return accesstokens.TokenResponse{}, err
+ }
+ return t.AccessTokens.FromUserAssertionClientCertificate(ctx, authParams, authParams.UserAssertion, jwt)
+}
+
+func (t *Client) Refresh(ctx context.Context, reqType accesstokens.AppType, authParams authority.AuthParams, cc *accesstokens.Credential, refreshToken accesstokens.RefreshToken) (accesstokens.TokenResponse, error) {
+ if err := t.resolveEndpoint(ctx, &authParams, ""); err != nil {
+ return accesstokens.TokenResponse{}, err
+ }
+
+ return t.AccessTokens.FromRefreshToken(ctx, reqType, authParams, cc, refreshToken.Secret)
+}
+
+// UsernamePassword retrieves a token where a username and password is used. However, if this is
+// a user realm of "Federated", this uses SAML tokens. If "Managed", uses normal username/password.
+func (t *Client) UsernamePassword(ctx context.Context, authParams authority.AuthParams) (accesstokens.TokenResponse, error) {
+ if authParams.AuthorityInfo.AuthorityType == authority.ADFS {
+ if err := t.resolveEndpoint(ctx, &authParams, authParams.Username); err != nil {
+ return accesstokens.TokenResponse{}, err
+ }
+ return t.AccessTokens.FromUsernamePassword(ctx, authParams)
+ }
+ if err := t.resolveEndpoint(ctx, &authParams, ""); err != nil {
+ return accesstokens.TokenResponse{}, err
+ }
+
+ userRealm, err := t.Authority.UserRealm(ctx, authParams)
+ if err != nil {
+ return accesstokens.TokenResponse{}, fmt.Errorf("problem getting user realm(user: %s) from authority: %w", authParams.Username, err)
+ }
+
+ switch userRealm.AccountType {
+ case authority.Federated:
+ mexDoc, err := t.WSTrust.Mex(ctx, userRealm.FederationMetadataURL)
+ if err != nil {
+ return accesstokens.TokenResponse{}, fmt.Errorf("problem getting mex doc from federated url(%s): %w", userRealm.FederationMetadataURL, err)
+ }
+
+ saml, err := t.WSTrust.SAMLTokenInfo(ctx, authParams, userRealm.CloudAudienceURN, mexDoc.UsernamePasswordEndpoint)
+ if err != nil {
+ return accesstokens.TokenResponse{}, fmt.Errorf("problem getting SAML token info: %w", err)
+ }
+ return t.AccessTokens.FromSamlGrant(ctx, authParams, saml)
+ case authority.Managed:
+ return t.AccessTokens.FromUsernamePassword(ctx, authParams)
+ }
+ return accesstokens.TokenResponse{}, errors.New("unknown account type")
+}
+
+// DeviceCode is the result of a call to Token.DeviceCode().
+type DeviceCode struct {
+ // Result is the device code result from the first call in the device code flow. This allows
+ // the caller to retrieve the displayed code that is used to authorize on the second device.
+ Result accesstokens.DeviceCodeResult
+ authParams authority.AuthParams
+
+ accessTokens AccessTokens
+}
+
+// Token returns a token AFTER the user uses the user code on the second device. This will block
+// until either: (1) the code is input by the user and the service releases a token, (2) the token
+// expires, (3) the Context passed to .DeviceCode() is cancelled or expires, (4) some other service
+// error occurs.
+func (d DeviceCode) Token(ctx context.Context) (accesstokens.TokenResponse, error) {
+ if d.accessTokens == nil {
+ return accesstokens.TokenResponse{}, fmt.Errorf("DeviceCode was either created outside its package or the creating method had an error. DeviceCode is not valid")
+ }
+
+ var cancel context.CancelFunc
+ d.Result.ExpiresOn.Sub(time.Now().UTC())
+ if deadline, ok := ctx.Deadline(); !ok || d.Result.ExpiresOn.Before(deadline) {
+ ctx, cancel = context.WithDeadline(ctx, d.Result.ExpiresOn)
+ } else {
+ ctx, cancel = context.WithCancel(ctx)
+ }
+ defer cancel()
+
+ var interval = 50 * time.Millisecond
+ timer := time.NewTimer(interval)
+ defer timer.Stop()
+
+ for {
+ timer.Reset(interval)
+ select {
+ case <-ctx.Done():
+ return accesstokens.TokenResponse{}, ctx.Err()
+ case <-timer.C:
+ interval += interval * 2
+ if interval > 5*time.Second {
+ interval = 5 * time.Second
+ }
+ }
+
+ token, err := d.accessTokens.FromDeviceCodeResult(ctx, d.authParams, d.Result)
+ if err != nil && isWaitDeviceCodeErr(err) {
+ continue
+ }
+ return token, err // This handles if it was a non-wait error or success
+ }
+}
+
+type deviceCodeError struct {
+ Error string `json:"error"`
+}
+
+func isWaitDeviceCodeErr(err error) bool {
+ var c errors.CallErr
+ if !errors.As(err, &c) {
+ return false
+ }
+ if c.Resp.StatusCode != 400 {
+ return false
+ }
+ var dCErr deviceCodeError
+ defer c.Resp.Body.Close()
+ body, err := io.ReadAll(c.Resp.Body)
+ if err != nil {
+ return false
+ }
+ err = json.Unmarshal(body, &dCErr)
+ if err != nil {
+ return false
+ }
+ if dCErr.Error == "authorization_pending" || dCErr.Error == "slow_down" {
+ return true
+ }
+ return false
+}
+
+// DeviceCode returns a DeviceCode object that can be used to get the code that must be entered on the second
+// device and optionally the token once the code has been entered on the second device.
+func (t *Client) DeviceCode(ctx context.Context, authParams authority.AuthParams) (DeviceCode, error) {
+ if err := t.resolveEndpoint(ctx, &authParams, ""); err != nil {
+ return DeviceCode{}, err
+ }
+
+ dcr, err := t.AccessTokens.DeviceCodeResult(ctx, authParams)
+ if err != nil {
+ return DeviceCode{}, err
+ }
+
+ return DeviceCode{Result: dcr, authParams: authParams, accessTokens: t.AccessTokens}, nil
+}
+
+func (t *Client) resolveEndpoint(ctx context.Context, authParams *authority.AuthParams, userPrincipalName string) error {
+ endpoints, err := t.Resolver.ResolveEndpoints(ctx, authParams.AuthorityInfo, userPrincipalName)
+ if err != nil {
+ return fmt.Errorf("unable to resolve an endpoint: %s", err)
+ }
+ authParams.Endpoints = endpoints
+ return nil
+}
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/accesstokens.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/accesstokens.go
new file mode 100644
index 00000000000..eaeb2ef5f08
--- /dev/null
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/accesstokens.go
@@ -0,0 +1,412 @@
+// Copyright (c) Microsoft Corporation.
+// Licensed under the MIT license.
+
+/*
+Package accesstokens exposes a REST client for querying backend systems to get various types of
+access tokens (oauth) for use in authentication.
+
+These calls are of type "application/x-www-form-urlencoded". This means we use url.Values to
+represent arguments and then encode them into the POST body message. We receive JSON in
+return for the requests. The request definition is defined in https://tools.ietf.org/html/rfc7521#section-4.2 .
+*/
+package accesstokens
+
+import (
+ "context"
+ "crypto"
+
+ /* #nosec */
+ "crypto/sha1"
+ "crypto/x509"
+ "encoding/base64"
+ "encoding/json"
+ "fmt"
+ "net/url"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/exported"
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority"
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/grant"
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust"
+ "github.com/golang-jwt/jwt/v4"
+ "github.com/google/uuid"
+)
+
+const (
+ grantType = "grant_type"
+ deviceCode = "device_code"
+ clientID = "client_id"
+ clientInfo = "client_info"
+ clientInfoVal = "1"
+ username = "username"
+ password = "password"
+)
+
+//go:generate stringer -type=AppType
+
+// AppType is whether the authorization code flow is for a public or confidential client.
+type AppType int8
+
+const (
+ // ATUnknown is the zero value when the type hasn't been set.
+ ATUnknown AppType = iota
+ // ATPublic indicates this if for the Public.Client.
+ ATPublic
+ // ATConfidential indicates this if for the Confidential.Client.
+ ATConfidential
+)
+
+type urlFormCaller interface {
+ URLFormCall(ctx context.Context, endpoint string, qv url.Values, resp interface{}) error
+}
+
+// DeviceCodeResponse represents the HTTP response received from the device code endpoint
+type DeviceCodeResponse struct {
+ authority.OAuthResponseBase
+
+ UserCode string `json:"user_code"`
+ DeviceCode string `json:"device_code"`
+ VerificationURL string `json:"verification_url"`
+ ExpiresIn int `json:"expires_in"`
+ Interval int `json:"interval"`
+ Message string `json:"message"`
+
+ AdditionalFields map[string]interface{}
+}
+
+// Convert converts the DeviceCodeResponse to a DeviceCodeResult
+func (dcr DeviceCodeResponse) Convert(clientID string, scopes []string) DeviceCodeResult {
+ expiresOn := time.Now().UTC().Add(time.Duration(dcr.ExpiresIn) * time.Second)
+ return NewDeviceCodeResult(dcr.UserCode, dcr.DeviceCode, dcr.VerificationURL, expiresOn, dcr.Interval, dcr.Message, clientID, scopes)
+}
+
+// Credential represents the credential used in confidential client flows. This can be either
+// a Secret or Cert/Key.
+type Credential struct {
+ // Secret contains the credential secret if we are doing auth by secret.
+ Secret string
+
+ // Cert is the public certificate, if we're authenticating by certificate.
+ Cert *x509.Certificate
+ // Key is the private key for signing, if we're authenticating by certificate.
+ Key crypto.PrivateKey
+ // X5c is the JWT assertion's x5c header value, required for SN/I authentication.
+ X5c []string
+
+ // AssertionCallback is a function provided by the application, if we're authenticating by assertion.
+ AssertionCallback func(context.Context, exported.AssertionRequestOptions) (string, error)
+
+ // TokenProvider is a function provided by the application that implements custom authentication
+ // logic for a confidential client
+ TokenProvider func(context.Context, exported.TokenProviderParameters) (exported.TokenProviderResult, error)
+}
+
+// JWT gets the jwt assertion when the credential is not using a secret.
+func (c *Credential) JWT(ctx context.Context, authParams authority.AuthParams) (string, error) {
+ if c.AssertionCallback != nil {
+ options := exported.AssertionRequestOptions{
+ ClientID: authParams.ClientID,
+ TokenEndpoint: authParams.Endpoints.TokenEndpoint,
+ }
+ return c.AssertionCallback(ctx, options)
+ }
+
+ token := jwt.NewWithClaims(jwt.SigningMethodRS256, jwt.MapClaims{
+ "aud": authParams.Endpoints.TokenEndpoint,
+ "exp": json.Number(strconv.FormatInt(time.Now().Add(10*time.Minute).Unix(), 10)),
+ "iss": authParams.ClientID,
+ "jti": uuid.New().String(),
+ "nbf": json.Number(strconv.FormatInt(time.Now().Unix(), 10)),
+ "sub": authParams.ClientID,
+ })
+ token.Header = map[string]interface{}{
+ "alg": "RS256",
+ "typ": "JWT",
+ "x5t": base64.StdEncoding.EncodeToString(thumbprint(c.Cert)),
+ }
+
+ if authParams.SendX5C {
+ token.Header["x5c"] = c.X5c
+ }
+
+ assertion, err := token.SignedString(c.Key)
+ if err != nil {
+ return "", fmt.Errorf("unable to sign a JWT token using private key: %w", err)
+ }
+ return assertion, nil
+}
+
+// thumbprint runs the asn1.Der bytes through sha1 for use in the x5t parameter of JWT.
+// https://tools.ietf.org/html/rfc7517#section-4.8
+func thumbprint(cert *x509.Certificate) []byte {
+ /* #nosec */
+ a := sha1.Sum(cert.Raw)
+ return a[:]
+}
+
+// Client represents the REST calls to get tokens from token generator backends.
+type Client struct {
+ // Comm provides the HTTP transport client.
+ Comm urlFormCaller
+
+ testing bool
+}
+
+// FromUsernamePassword uses a username and password to get an access token.
+func (c Client) FromUsernamePassword(ctx context.Context, authParameters authority.AuthParams) (TokenResponse, error) {
+ qv := url.Values{}
+ qv.Set(grantType, grant.Password)
+ qv.Set(username, authParameters.Username)
+ qv.Set(password, authParameters.Password)
+ qv.Set(clientID, authParameters.ClientID)
+ qv.Set(clientInfo, clientInfoVal)
+ addScopeQueryParam(qv, authParameters)
+
+ return c.doTokenResp(ctx, authParameters, qv)
+}
+
+// AuthCodeRequest stores the values required to request a token from the authority using an authorization code
+type AuthCodeRequest struct {
+ AuthParams authority.AuthParams
+ Code string
+ CodeChallenge string
+ Credential *Credential
+ AppType AppType
+}
+
+// NewCodeChallengeRequest returns an AuthCodeRequest that uses a code challenge..
+func NewCodeChallengeRequest(params authority.AuthParams, appType AppType, cc *Credential, code, challenge string) (AuthCodeRequest, error) {
+ if appType == ATUnknown {
+ return AuthCodeRequest{}, fmt.Errorf("bug: NewCodeChallengeRequest() called with AppType == ATUnknown")
+ }
+ return AuthCodeRequest{
+ AuthParams: params,
+ AppType: appType,
+ Code: code,
+ CodeChallenge: challenge,
+ Credential: cc,
+ }, nil
+}
+
+// FromAuthCode uses an authorization code to retrieve an access token.
+func (c Client) FromAuthCode(ctx context.Context, req AuthCodeRequest) (TokenResponse, error) {
+ var qv url.Values
+
+ switch req.AppType {
+ case ATUnknown:
+ return TokenResponse{}, fmt.Errorf("bug: Token.AuthCode() received request with AppType == ATUnknown")
+ case ATConfidential:
+ var err error
+ if req.Credential == nil {
+ return TokenResponse{}, fmt.Errorf("AuthCodeRequest had nil Credential for Confidential app")
+ }
+ qv, err = prepURLVals(ctx, req.Credential, req.AuthParams)
+ if err != nil {
+ return TokenResponse{}, err
+ }
+ case ATPublic:
+ qv = url.Values{}
+ default:
+ return TokenResponse{}, fmt.Errorf("bug: Token.AuthCode() received request with AppType == %v, which we do not recongnize", req.AppType)
+ }
+
+ qv.Set(grantType, grant.AuthCode)
+ qv.Set("code", req.Code)
+ qv.Set("code_verifier", req.CodeChallenge)
+ qv.Set("redirect_uri", req.AuthParams.Redirecturi)
+ qv.Set(clientID, req.AuthParams.ClientID)
+ qv.Set(clientInfo, clientInfoVal)
+ addScopeQueryParam(qv, req.AuthParams)
+
+ return c.doTokenResp(ctx, req.AuthParams, qv)
+}
+
+// FromRefreshToken uses a refresh token (for refreshing credentials) to get a new access token.
+func (c Client) FromRefreshToken(ctx context.Context, appType AppType, authParams authority.AuthParams, cc *Credential, refreshToken string) (TokenResponse, error) {
+ qv := url.Values{}
+ if appType == ATConfidential {
+ var err error
+ qv, err = prepURLVals(ctx, cc, authParams)
+ if err != nil {
+ return TokenResponse{}, err
+ }
+ }
+ qv.Set(grantType, grant.RefreshToken)
+ qv.Set(clientID, authParams.ClientID)
+ qv.Set(clientInfo, clientInfoVal)
+ qv.Set("refresh_token", refreshToken)
+ addScopeQueryParam(qv, authParams)
+
+ return c.doTokenResp(ctx, authParams, qv)
+}
+
+// FromClientSecret uses a client's secret (aka password) to get a new token.
+func (c Client) FromClientSecret(ctx context.Context, authParameters authority.AuthParams, clientSecret string) (TokenResponse, error) {
+ qv := url.Values{}
+ qv.Set(grantType, grant.ClientCredential)
+ qv.Set("client_secret", clientSecret)
+ qv.Set(clientID, authParameters.ClientID)
+ addScopeQueryParam(qv, authParameters)
+
+ token, err := c.doTokenResp(ctx, authParameters, qv)
+ if err != nil {
+ return token, fmt.Errorf("FromClientSecret(): %w", err)
+ }
+ return token, nil
+}
+
+func (c Client) FromAssertion(ctx context.Context, authParameters authority.AuthParams, assertion string) (TokenResponse, error) {
+ qv := url.Values{}
+ qv.Set(grantType, grant.ClientCredential)
+ qv.Set("client_assertion_type", grant.ClientAssertion)
+ qv.Set("client_assertion", assertion)
+ qv.Set(clientID, authParameters.ClientID)
+ qv.Set(clientInfo, clientInfoVal)
+ addScopeQueryParam(qv, authParameters)
+
+ token, err := c.doTokenResp(ctx, authParameters, qv)
+ if err != nil {
+ return token, fmt.Errorf("FromAssertion(): %w", err)
+ }
+ return token, nil
+}
+
+func (c Client) FromUserAssertionClientSecret(ctx context.Context, authParameters authority.AuthParams, userAssertion string, clientSecret string) (TokenResponse, error) {
+ qv := url.Values{}
+ qv.Set(grantType, grant.JWT)
+ qv.Set(clientID, authParameters.ClientID)
+ qv.Set("client_secret", clientSecret)
+ qv.Set("assertion", userAssertion)
+ qv.Set(clientInfo, clientInfoVal)
+ qv.Set("requested_token_use", "on_behalf_of")
+ addScopeQueryParam(qv, authParameters)
+
+ return c.doTokenResp(ctx, authParameters, qv)
+}
+
+func (c Client) FromUserAssertionClientCertificate(ctx context.Context, authParameters authority.AuthParams, userAssertion string, assertion string) (TokenResponse, error) {
+ qv := url.Values{}
+ qv.Set(grantType, grant.JWT)
+ qv.Set("client_assertion_type", grant.ClientAssertion)
+ qv.Set("client_assertion", assertion)
+ qv.Set(clientID, authParameters.ClientID)
+ qv.Set("assertion", userAssertion)
+ qv.Set(clientInfo, clientInfoVal)
+ qv.Set("requested_token_use", "on_behalf_of")
+ addScopeQueryParam(qv, authParameters)
+
+ return c.doTokenResp(ctx, authParameters, qv)
+}
+
+func (c Client) DeviceCodeResult(ctx context.Context, authParameters authority.AuthParams) (DeviceCodeResult, error) {
+ qv := url.Values{}
+ qv.Set(clientID, authParameters.ClientID)
+ addScopeQueryParam(qv, authParameters)
+
+ endpoint := strings.Replace(authParameters.Endpoints.TokenEndpoint, "token", "devicecode", -1)
+
+ resp := DeviceCodeResponse{}
+ err := c.Comm.URLFormCall(ctx, endpoint, qv, &resp)
+ if err != nil {
+ return DeviceCodeResult{}, err
+ }
+
+ return resp.Convert(authParameters.ClientID, authParameters.Scopes), nil
+}
+
+func (c Client) FromDeviceCodeResult(ctx context.Context, authParameters authority.AuthParams, deviceCodeResult DeviceCodeResult) (TokenResponse, error) {
+ qv := url.Values{}
+ qv.Set(grantType, grant.DeviceCode)
+ qv.Set(deviceCode, deviceCodeResult.DeviceCode)
+ qv.Set(clientID, authParameters.ClientID)
+ qv.Set(clientInfo, clientInfoVal)
+ addScopeQueryParam(qv, authParameters)
+
+ return c.doTokenResp(ctx, authParameters, qv)
+}
+
+func (c Client) FromSamlGrant(ctx context.Context, authParameters authority.AuthParams, samlGrant wstrust.SamlTokenInfo) (TokenResponse, error) {
+ qv := url.Values{}
+ qv.Set(username, authParameters.Username)
+ qv.Set(password, authParameters.Password)
+ qv.Set(clientID, authParameters.ClientID)
+ qv.Set(clientInfo, clientInfoVal)
+ qv.Set("assertion", base64.StdEncoding.WithPadding(base64.StdPadding).EncodeToString([]byte(samlGrant.Assertion)))
+ addScopeQueryParam(qv, authParameters)
+
+ switch samlGrant.AssertionType {
+ case grant.SAMLV1:
+ qv.Set(grantType, grant.SAMLV1)
+ case grant.SAMLV2:
+ qv.Set(grantType, grant.SAMLV2)
+ default:
+ return TokenResponse{}, fmt.Errorf("GetAccessTokenFromSamlGrant returned unknown SAML assertion type: %q", samlGrant.AssertionType)
+ }
+
+ return c.doTokenResp(ctx, authParameters, qv)
+}
+
+func (c Client) doTokenResp(ctx context.Context, authParams authority.AuthParams, qv url.Values) (TokenResponse, error) {
+ resp := TokenResponse{}
+ err := c.Comm.URLFormCall(ctx, authParams.Endpoints.TokenEndpoint, qv, &resp)
+ if err != nil {
+ return resp, err
+ }
+ resp.ComputeScope(authParams)
+ if c.testing {
+ return resp, nil
+ }
+ return resp, resp.Validate()
+}
+
+// prepURLVals returns an url.Values that sets various key/values if we are doing secrets
+// or JWT assertions.
+func prepURLVals(ctx context.Context, cc *Credential, authParams authority.AuthParams) (url.Values, error) {
+ params := url.Values{}
+ if cc.Secret != "" {
+ params.Set("client_secret", cc.Secret)
+ return params, nil
+ }
+
+ jwt, err := cc.JWT(ctx, authParams)
+ if err != nil {
+ return nil, err
+ }
+ params.Set("client_assertion", jwt)
+ params.Set("client_assertion_type", grant.ClientAssertion)
+ return params, nil
+}
+
+// openid required to get an id token
+// offline_access required to get a refresh token
+// profile required to get the client_info field back
+var detectDefaultScopes = map[string]bool{
+ "openid": true,
+ "offline_access": true,
+ "profile": true,
+}
+
+var defaultScopes = []string{"openid", "offline_access", "profile"}
+
+func AppendDefaultScopes(authParameters authority.AuthParams) []string {
+ scopes := make([]string, 0, len(authParameters.Scopes)+len(defaultScopes))
+ for _, scope := range authParameters.Scopes {
+ s := strings.TrimSpace(scope)
+ if s == "" {
+ continue
+ }
+ if detectDefaultScopes[scope] {
+ continue
+ }
+ scopes = append(scopes, scope)
+ }
+ scopes = append(scopes, defaultScopes...)
+ return scopes
+}
+
+func addScopeQueryParam(queryParams url.Values, authParameters authority.AuthParams) {
+ scopes := AppendDefaultScopes(authParameters)
+ queryParams.Set("scope", strings.Join(scopes, " "))
+}
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/apptype_string.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/apptype_string.go
new file mode 100644
index 00000000000..3bec4a67cf1
--- /dev/null
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/apptype_string.go
@@ -0,0 +1,25 @@
+// Code generated by "stringer -type=AppType"; DO NOT EDIT.
+
+package accesstokens
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[ATUnknown-0]
+ _ = x[ATPublic-1]
+ _ = x[ATConfidential-2]
+}
+
+const _AppType_name = "ATUnknownATPublicATConfidential"
+
+var _AppType_index = [...]uint8{0, 9, 17, 31}
+
+func (i AppType) String() string {
+ if i < 0 || i >= AppType(len(_AppType_index)-1) {
+ return "AppType(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+ return _AppType_name[_AppType_index[i]:_AppType_index[i+1]]
+}
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/tokens.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/tokens.go
new file mode 100644
index 00000000000..cc847001979
--- /dev/null
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/tokens.go
@@ -0,0 +1,332 @@
+// Copyright (c) Microsoft Corporation.
+// Licensed under the MIT license.
+
+package accesstokens
+
+import (
+ "bytes"
+ "encoding/base64"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "reflect"
+ "strings"
+ "time"
+
+ internalTime "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/types/time"
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority"
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared"
+)
+
+// IDToken consists of all the information used to validate a user.
+// https://docs.microsoft.com/azure/active-directory/develop/id-tokens .
+type IDToken struct {
+ PreferredUsername string `json:"preferred_username,omitempty"`
+ GivenName string `json:"given_name,omitempty"`
+ FamilyName string `json:"family_name,omitempty"`
+ MiddleName string `json:"middle_name,omitempty"`
+ Name string `json:"name,omitempty"`
+ Oid string `json:"oid,omitempty"`
+ TenantID string `json:"tid,omitempty"`
+ Subject string `json:"sub,omitempty"`
+ UPN string `json:"upn,omitempty"`
+ Email string `json:"email,omitempty"`
+ AlternativeID string `json:"alternative_id,omitempty"`
+ Issuer string `json:"iss,omitempty"`
+ Audience string `json:"aud,omitempty"`
+ ExpirationTime int64 `json:"exp,omitempty"`
+ IssuedAt int64 `json:"iat,omitempty"`
+ NotBefore int64 `json:"nbf,omitempty"`
+ RawToken string
+
+ AdditionalFields map[string]interface{}
+}
+
+var null = []byte("null")
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (i *IDToken) UnmarshalJSON(b []byte) error {
+ if bytes.Equal(null, b) {
+ return nil
+ }
+
+ // Because we have a custom unmarshaler, you
+ // cannot directly call json.Unmarshal here. If you do, it will call this function
+ // recursively until reach our recursion limit. We have to create a new type
+ // that doesn't have this method in order to use json.Unmarshal.
+ type idToken2 IDToken
+
+ jwt := strings.Trim(string(b), `"`)
+ jwtArr := strings.Split(jwt, ".")
+ if len(jwtArr) < 2 {
+ return errors.New("IDToken returned from server is invalid")
+ }
+
+ jwtPart := jwtArr[1]
+ jwtDecoded, err := decodeJWT(jwtPart)
+ if err != nil {
+ return fmt.Errorf("unable to unmarshal IDToken, problem decoding JWT: %w", err)
+ }
+
+ token := idToken2{}
+ err = json.Unmarshal(jwtDecoded, &token)
+ if err != nil {
+ return fmt.Errorf("unable to unmarshal IDToken: %w", err)
+ }
+ token.RawToken = jwt
+
+ *i = IDToken(token)
+ return nil
+}
+
+// IsZero indicates if the IDToken is the zero value.
+func (i IDToken) IsZero() bool {
+ v := reflect.ValueOf(i)
+ for i := 0; i < v.NumField(); i++ {
+ field := v.Field(i)
+ if !field.IsZero() {
+ switch field.Kind() {
+ case reflect.Map, reflect.Slice:
+ if field.Len() == 0 {
+ continue
+ }
+ }
+ return false
+ }
+ }
+ return true
+}
+
+// LocalAccountID extracts an account's local account ID from an ID token.
+func (i IDToken) LocalAccountID() string {
+ if i.Oid != "" {
+ return i.Oid
+ }
+ return i.Subject
+}
+
+// jwtDecoder is provided to allow tests to provide their own.
+var jwtDecoder = decodeJWT
+
+// ClientInfo is used to create a Home Account ID for an account.
+type ClientInfo struct {
+ UID string `json:"uid"`
+ UTID string `json:"utid"`
+
+ AdditionalFields map[string]interface{}
+}
+
+// UnmarshalJSON implements json.Unmarshaler.s
+func (c *ClientInfo) UnmarshalJSON(b []byte) error {
+ s := strings.Trim(string(b), `"`)
+ // Client info may be empty in some flows, e.g. certificate exchange.
+ if len(s) == 0 {
+ return nil
+ }
+
+ // Because we have a custom unmarshaler, you
+ // cannot directly call json.Unmarshal here. If you do, it will call this function
+ // recursively until reach our recursion limit. We have to create a new type
+ // that doesn't have this method in order to use json.Unmarshal.
+ type clientInfo2 ClientInfo
+
+ raw, err := jwtDecoder(s)
+ if err != nil {
+ return fmt.Errorf("TokenResponse client_info field had JWT decode error: %w", err)
+ }
+
+ var c2 clientInfo2
+
+ err = json.Unmarshal(raw, &c2)
+ if err != nil {
+ return fmt.Errorf("was unable to unmarshal decoded JWT in TokenRespone to ClientInfo: %w", err)
+ }
+
+ *c = ClientInfo(c2)
+ return nil
+}
+
+// HomeAccountID creates the home account ID.
+func (c ClientInfo) HomeAccountID() string {
+ if c.UID == "" || c.UTID == "" {
+ return ""
+ }
+ return fmt.Sprintf("%s.%s", c.UID, c.UTID)
+}
+
+// Scopes represents scopes in a TokenResponse.
+type Scopes struct {
+ Slice []string
+}
+
+// UnmarshalJSON implements json.Unmarshal.
+func (s *Scopes) UnmarshalJSON(b []byte) error {
+ str := strings.Trim(string(b), `"`)
+ if len(str) == 0 {
+ return nil
+ }
+ sl := strings.Split(str, " ")
+ s.Slice = sl
+ return nil
+}
+
+// TokenResponse is the information that is returned from a token endpoint during a token acquisition flow.
+type TokenResponse struct {
+ authority.OAuthResponseBase
+
+ AccessToken string `json:"access_token"`
+ RefreshToken string `json:"refresh_token"`
+
+ FamilyID string `json:"foci"`
+ IDToken IDToken `json:"id_token"`
+ ClientInfo ClientInfo `json:"client_info"`
+ ExpiresOn internalTime.DurationTime `json:"expires_in"`
+ ExtExpiresOn internalTime.DurationTime `json:"ext_expires_in"`
+ GrantedScopes Scopes `json:"scope"`
+ DeclinedScopes []string // This is derived
+
+ AdditionalFields map[string]interface{}
+
+ scopesComputed bool
+}
+
+// ComputeScope computes the final scopes based on what was granted by the server and
+// what our AuthParams were from the authority server. Per OAuth spec, if no scopes are returned, the response should be treated as if all scopes were granted
+// This behavior can be observed in client assertion flows, but can happen at any time, this check ensures we treat
+// those special responses properly Link to spec: https://tools.ietf.org/html/rfc6749#section-3.3
+func (tr *TokenResponse) ComputeScope(authParams authority.AuthParams) {
+ if len(tr.GrantedScopes.Slice) == 0 {
+ tr.GrantedScopes = Scopes{Slice: authParams.Scopes}
+ } else {
+ tr.DeclinedScopes = findDeclinedScopes(authParams.Scopes, tr.GrantedScopes.Slice)
+ }
+ tr.scopesComputed = true
+}
+
+// Validate validates the TokenResponse has basic valid values. It must be called
+// after ComputeScopes() is called.
+func (tr *TokenResponse) Validate() error {
+ if tr.Error != "" {
+ return fmt.Errorf("%s: %s", tr.Error, tr.ErrorDescription)
+ }
+
+ if tr.AccessToken == "" {
+ return errors.New("response is missing access_token")
+ }
+
+ if !tr.scopesComputed {
+ return fmt.Errorf("TokenResponse hasn't had ScopesComputed() called")
+ }
+ return nil
+}
+
+func (tr *TokenResponse) CacheKey(authParams authority.AuthParams) string {
+ if authParams.AuthorizationType == authority.ATOnBehalfOf {
+ return authParams.AssertionHash()
+ }
+ if authParams.AuthorizationType == authority.ATClientCredentials {
+ return authParams.AppKey()
+ }
+ if authParams.IsConfidentialClient || authParams.AuthorizationType == authority.ATRefreshToken {
+ return tr.ClientInfo.HomeAccountID()
+ }
+ return ""
+}
+
+func findDeclinedScopes(requestedScopes []string, grantedScopes []string) []string {
+ declined := []string{}
+ grantedMap := map[string]bool{}
+ for _, s := range grantedScopes {
+ grantedMap[strings.ToLower(s)] = true
+ }
+ // Comparing the requested scopes with the granted scopes to see if there are any scopes that have been declined.
+ for _, r := range requestedScopes {
+ if !grantedMap[strings.ToLower(r)] {
+ declined = append(declined, r)
+ }
+ }
+ return declined
+}
+
+// decodeJWT decodes a JWT and converts it to a byte array representing a JSON object
+// JWT has headers and payload base64url encoded without padding
+// https://tools.ietf.org/html/rfc7519#section-3 and
+// https://tools.ietf.org/html/rfc7515#section-2
+func decodeJWT(data string) ([]byte, error) {
+ // https://tools.ietf.org/html/rfc7515#appendix-C
+ return base64.RawURLEncoding.DecodeString(data)
+}
+
+// RefreshToken is the JSON representation of a MSAL refresh token for encoding to storage.
+type RefreshToken struct {
+ HomeAccountID string `json:"home_account_id,omitempty"`
+ Environment string `json:"environment,omitempty"`
+ CredentialType string `json:"credential_type,omitempty"`
+ ClientID string `json:"client_id,omitempty"`
+ FamilyID string `json:"family_id,omitempty"`
+ Secret string `json:"secret,omitempty"`
+ Realm string `json:"realm,omitempty"`
+ Target string `json:"target,omitempty"`
+ UserAssertionHash string `json:"user_assertion_hash,omitempty"`
+
+ AdditionalFields map[string]interface{}
+}
+
+// NewRefreshToken is the constructor for RefreshToken.
+func NewRefreshToken(homeID, env, clientID, refreshToken, familyID string) RefreshToken {
+ return RefreshToken{
+ HomeAccountID: homeID,
+ Environment: env,
+ CredentialType: "RefreshToken",
+ ClientID: clientID,
+ FamilyID: familyID,
+ Secret: refreshToken,
+ }
+}
+
+// Key outputs the key that can be used to uniquely look up this entry in a map.
+func (rt RefreshToken) Key() string {
+ var fourth = rt.FamilyID
+ if fourth == "" {
+ fourth = rt.ClientID
+ }
+
+ return strings.Join(
+ []string{rt.HomeAccountID, rt.Environment, rt.CredentialType, fourth},
+ shared.CacheKeySeparator,
+ )
+}
+
+func (rt RefreshToken) GetSecret() string {
+ return rt.Secret
+}
+
+// DeviceCodeResult stores the response from the STS device code endpoint.
+type DeviceCodeResult struct {
+ // UserCode is the code the user needs to provide when authentication at the verification URI.
+ UserCode string
+ // DeviceCode is the code used in the access token request.
+ DeviceCode string
+ // VerificationURL is the the URL where user can authenticate.
+ VerificationURL string
+ // ExpiresOn is the expiration time of device code in seconds.
+ ExpiresOn time.Time
+ // Interval is the interval at which the STS should be polled at.
+ Interval int
+ // Message is the message which should be displayed to the user.
+ Message string
+ // ClientID is the UUID issued by the authorization server for your application.
+ ClientID string
+ // Scopes is the OpenID scopes used to request access a protected API.
+ Scopes []string
+}
+
+// NewDeviceCodeResult creates a DeviceCodeResult instance.
+func NewDeviceCodeResult(userCode, deviceCode, verificationURL string, expiresOn time.Time, interval int, message, clientID string, scopes []string) DeviceCodeResult {
+ return DeviceCodeResult{userCode, deviceCode, verificationURL, expiresOn, interval, message, clientID, scopes}
+}
+
+func (dcr DeviceCodeResult) String() string {
+ return fmt.Sprintf("UserCode: (%v)\nDeviceCode: (%v)\nURL: (%v)\nMessage: (%v)\n", dcr.UserCode, dcr.DeviceCode, dcr.VerificationURL, dcr.Message)
+
+}
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authority.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authority.go
new file mode 100644
index 00000000000..4724d944ff8
--- /dev/null
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authority.go
@@ -0,0 +1,421 @@
+// Copyright (c) Microsoft Corporation.
+// Licensed under the MIT license.
+
+package authority
+
+import (
+ "context"
+ "crypto/sha256"
+ "encoding/base64"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "os"
+ "strings"
+ "time"
+
+ "github.com/google/uuid"
+)
+
+const (
+ authorizationEndpoint = "https://%v/%v/oauth2/v2.0/authorize"
+ instanceDiscoveryEndpoint = "https://%v/common/discovery/instance"
+ tenantDiscoveryEndpointWithRegion = "https://%s.%s/%s/v2.0/.well-known/openid-configuration"
+ regionName = "REGION_NAME"
+ defaultAPIVersion = "2021-10-01"
+ imdsEndpoint = "http://169.254.169.254/metadata/instance/compute/location?format=text&api-version=" + defaultAPIVersion
+ defaultHost = "login.microsoftonline.com"
+ autoDetectRegion = "TryAutoDetect"
+)
+
+type jsonCaller interface {
+ JSONCall(ctx context.Context, endpoint string, headers http.Header, qv url.Values, body, resp interface{}) error
+}
+
+var aadTrustedHostList = map[string]bool{
+ "login.windows.net": true, // Microsoft Azure Worldwide - Used in validation scenarios where host is not this list
+ "login.chinacloudapi.cn": true, // Microsoft Azure China
+ "login.microsoftonline.de": true, // Microsoft Azure Blackforest
+ "login-us.microsoftonline.com": true, // Microsoft Azure US Government - Legacy
+ "login.microsoftonline.us": true, // Microsoft Azure US Government
+ "login.microsoftonline.com": true, // Microsoft Azure Worldwide
+ "login.cloudgovapi.us": true, // Microsoft Azure US Government
+}
+
+// TrustedHost checks if an AAD host is trusted/valid.
+func TrustedHost(host string) bool {
+ if _, ok := aadTrustedHostList[host]; ok {
+ return true
+ }
+ return false
+}
+
+type OAuthResponseBase struct {
+ Error string `json:"error"`
+ SubError string `json:"suberror"`
+ ErrorDescription string `json:"error_description"`
+ ErrorCodes []int `json:"error_codes"`
+ CorrelationID string `json:"correlation_id"`
+ Claims string `json:"claims"`
+}
+
+// TenantDiscoveryResponse is the tenant endpoints from the OpenID configuration endpoint.
+type TenantDiscoveryResponse struct {
+ OAuthResponseBase
+
+ AuthorizationEndpoint string `json:"authorization_endpoint"`
+ TokenEndpoint string `json:"token_endpoint"`
+ Issuer string `json:"issuer"`
+
+ AdditionalFields map[string]interface{}
+}
+
+// Validate validates that the response had the correct values required.
+func (r *TenantDiscoveryResponse) Validate() error {
+ switch "" {
+ case r.AuthorizationEndpoint:
+ return errors.New("TenantDiscoveryResponse: authorize endpoint was not found in the openid configuration")
+ case r.TokenEndpoint:
+ return errors.New("TenantDiscoveryResponse: token endpoint was not found in the openid configuration")
+ case r.Issuer:
+ return errors.New("TenantDiscoveryResponse: issuer was not found in the openid configuration")
+ }
+ return nil
+}
+
+type InstanceDiscoveryMetadata struct {
+ PreferredNetwork string `json:"preferred_network"`
+ PreferredCache string `json:"preferred_cache"`
+ Aliases []string `json:"aliases"`
+
+ AdditionalFields map[string]interface{}
+}
+
+type InstanceDiscoveryResponse struct {
+ TenantDiscoveryEndpoint string `json:"tenant_discovery_endpoint"`
+ Metadata []InstanceDiscoveryMetadata `json:"metadata"`
+
+ AdditionalFields map[string]interface{}
+}
+
+//go:generate stringer -type=AuthorizeType
+
+// AuthorizeType represents the type of token flow.
+type AuthorizeType int
+
+// These are all the types of token flows.
+const (
+ ATUnknown AuthorizeType = iota
+ ATUsernamePassword
+ ATWindowsIntegrated
+ ATAuthCode
+ ATInteractive
+ ATClientCredentials
+ ATDeviceCode
+ ATRefreshToken
+ AccountByID
+ ATOnBehalfOf
+)
+
+// These are all authority types
+const (
+ AAD = "MSSTS"
+ ADFS = "ADFS"
+)
+
+// AuthParams represents the parameters used for authorization for token acquisition.
+type AuthParams struct {
+ AuthorityInfo Info
+ CorrelationID string
+ Endpoints Endpoints
+ ClientID string
+ // Redirecturi is used for auth flows that specify a redirect URI (e.g. local server for interactive auth flow).
+ Redirecturi string
+ HomeAccountID string
+ // Username is the user-name portion for username/password auth flow.
+ Username string
+ // Password is the password portion for username/password auth flow.
+ Password string
+ // Scopes is the list of scopes the user consents to.
+ Scopes []string
+ // AuthorizationType specifies the auth flow being used.
+ AuthorizationType AuthorizeType
+ // State is a random value used to prevent cross-site request forgery attacks.
+ State string
+ // CodeChallenge is derived from a code verifier and is sent in the auth request.
+ CodeChallenge string
+ // CodeChallengeMethod describes the method used to create the CodeChallenge.
+ CodeChallengeMethod string
+ // Prompt specifies the user prompt type during interactive auth.
+ Prompt string
+ // IsConfidentialClient specifies if it is a confidential client.
+ IsConfidentialClient bool
+ // SendX5C specifies if x5c claim(public key of the certificate) should be sent to STS.
+ SendX5C bool
+ // UserAssertion is the access token used to acquire token on behalf of user
+ UserAssertion string
+
+ // KnownAuthorityHosts don't require metadata discovery because they're known to the user
+ KnownAuthorityHosts []string
+}
+
+// NewAuthParams creates an authorization parameters object.
+func NewAuthParams(clientID string, authorityInfo Info) AuthParams {
+ return AuthParams{
+ ClientID: clientID,
+ AuthorityInfo: authorityInfo,
+ CorrelationID: uuid.New().String(),
+ }
+}
+
+// Info consists of information about the authority.
+type Info struct {
+ Host string
+ CanonicalAuthorityURI string
+ AuthorityType string
+ UserRealmURIPrefix string
+ ValidateAuthority bool
+ Tenant string
+ Region string
+}
+
+func firstPathSegment(u *url.URL) (string, error) {
+ pathParts := strings.Split(u.EscapedPath(), "/")
+ if len(pathParts) >= 2 {
+ return pathParts[1], nil
+ }
+
+ return "", errors.New("authority does not have two segments")
+}
+
+// NewInfoFromAuthorityURI creates an AuthorityInfo instance from the authority URL provided.
+func NewInfoFromAuthorityURI(authorityURI string, validateAuthority bool) (Info, error) {
+ authorityURI = strings.ToLower(authorityURI)
+ var authorityType string
+ u, err := url.Parse(authorityURI)
+ if err != nil {
+ return Info{}, fmt.Errorf("authorityURI passed could not be parsed: %w", err)
+ }
+ if u.Scheme != "https" {
+ return Info{}, fmt.Errorf("authorityURI(%s) must have scheme https", authorityURI)
+ }
+
+ tenant, err := firstPathSegment(u)
+ if tenant == "adfs" {
+ authorityType = ADFS
+ } else {
+ authorityType = AAD
+ }
+
+ if err != nil {
+ return Info{}, err
+ }
+
+ return Info{
+ Host: u.Hostname(),
+ CanonicalAuthorityURI: fmt.Sprintf("https://%v/%v/", u.Hostname(), tenant),
+ AuthorityType: authorityType,
+ UserRealmURIPrefix: fmt.Sprintf("https://%v/common/userrealm/", u.Hostname()),
+ ValidateAuthority: validateAuthority,
+ Tenant: tenant,
+ }, nil
+}
+
+// Endpoints consists of the endpoints from the tenant discovery response.
+type Endpoints struct {
+ AuthorizationEndpoint string
+ TokenEndpoint string
+ selfSignedJwtAudience string
+ authorityHost string
+}
+
+// NewEndpoints creates an Endpoints object.
+func NewEndpoints(authorizationEndpoint string, tokenEndpoint string, selfSignedJwtAudience string, authorityHost string) Endpoints {
+ return Endpoints{authorizationEndpoint, tokenEndpoint, selfSignedJwtAudience, authorityHost}
+}
+
+// UserRealmAccountType refers to the type of user realm.
+type UserRealmAccountType string
+
+// These are the different types of user realms.
+const (
+ Unknown UserRealmAccountType = ""
+ Federated UserRealmAccountType = "Federated"
+ Managed UserRealmAccountType = "Managed"
+)
+
+// UserRealm is used for the username password request to determine user type
+type UserRealm struct {
+ AccountType UserRealmAccountType `json:"account_type"`
+ DomainName string `json:"domain_name"`
+ CloudInstanceName string `json:"cloud_instance_name"`
+ CloudAudienceURN string `json:"cloud_audience_urn"`
+
+ // required if accountType is Federated
+ FederationProtocol string `json:"federation_protocol"`
+ FederationMetadataURL string `json:"federation_metadata_url"`
+
+ AdditionalFields map[string]interface{}
+}
+
+func (u UserRealm) validate() error {
+ switch "" {
+ case string(u.AccountType):
+ return errors.New("the account type (Federated or Managed) is missing")
+ case u.DomainName:
+ return errors.New("domain name of user realm is missing")
+ case u.CloudInstanceName:
+ return errors.New("cloud instance name of user realm is missing")
+ case u.CloudAudienceURN:
+ return errors.New("cloud Instance URN is missing")
+ }
+
+ if u.AccountType == Federated {
+ switch "" {
+ case u.FederationProtocol:
+ return errors.New("federation protocol of user realm is missing")
+ case u.FederationMetadataURL:
+ return errors.New("federation metadata URL of user realm is missing")
+ }
+ }
+ return nil
+}
+
+// Client represents the REST calls to authority backends.
+type Client struct {
+ // Comm provides the HTTP transport client.
+ Comm jsonCaller // *comm.Client
+}
+
+func (c Client) UserRealm(ctx context.Context, authParams AuthParams) (UserRealm, error) {
+ endpoint := fmt.Sprintf("https://%s/common/UserRealm/%s", authParams.Endpoints.authorityHost, url.PathEscape(authParams.Username))
+ qv := url.Values{
+ "api-version": []string{"1.0"},
+ }
+
+ resp := UserRealm{}
+ err := c.Comm.JSONCall(
+ ctx,
+ endpoint,
+ http.Header{"client-request-id": []string{authParams.CorrelationID}},
+ qv,
+ nil,
+ &resp,
+ )
+ if err != nil {
+ return resp, err
+ }
+
+ return resp, resp.validate()
+}
+
+func (c Client) GetTenantDiscoveryResponse(ctx context.Context, openIDConfigurationEndpoint string) (TenantDiscoveryResponse, error) {
+ resp := TenantDiscoveryResponse{}
+ err := c.Comm.JSONCall(
+ ctx,
+ openIDConfigurationEndpoint,
+ http.Header{},
+ nil,
+ nil,
+ &resp,
+ )
+
+ return resp, err
+}
+
+func (c Client) AADInstanceDiscovery(ctx context.Context, authorityInfo Info) (InstanceDiscoveryResponse, error) {
+ region := ""
+ var err error
+ resp := InstanceDiscoveryResponse{}
+ if authorityInfo.Region != "" && authorityInfo.Region != autoDetectRegion {
+ region = authorityInfo.Region
+ } else if authorityInfo.Region == autoDetectRegion {
+ region = detectRegion(ctx)
+ }
+ if region != "" {
+ environment := authorityInfo.Host
+ switch environment {
+ case "login.microsoft.com", "login.windows.net", "sts.windows.net", defaultHost:
+ environment = "r." + defaultHost
+ }
+ resp.TenantDiscoveryEndpoint = fmt.Sprintf(tenantDiscoveryEndpointWithRegion, region, environment, authorityInfo.Tenant)
+ metadata := InstanceDiscoveryMetadata{
+ PreferredNetwork: fmt.Sprintf("%v.%v", region, authorityInfo.Host),
+ PreferredCache: authorityInfo.Host,
+ Aliases: []string{fmt.Sprintf("%v.%v", region, authorityInfo.Host), authorityInfo.Host},
+ }
+ resp.Metadata = []InstanceDiscoveryMetadata{metadata}
+ } else {
+ qv := url.Values{}
+ qv.Set("api-version", "1.1")
+ qv.Set("authorization_endpoint", fmt.Sprintf(authorizationEndpoint, authorityInfo.Host, authorityInfo.Tenant))
+
+ discoveryHost := defaultHost
+ if TrustedHost(authorityInfo.Host) {
+ discoveryHost = authorityInfo.Host
+ }
+
+ endpoint := fmt.Sprintf(instanceDiscoveryEndpoint, discoveryHost)
+ err = c.Comm.JSONCall(ctx, endpoint, http.Header{}, qv, nil, &resp)
+ }
+ return resp, err
+}
+
+func detectRegion(ctx context.Context) string {
+ region := os.Getenv(regionName)
+ if region != "" {
+ region = strings.ReplaceAll(region, " ", "")
+ return strings.ToLower(region)
+ }
+ // HTTP call to IMDS endpoint to get region
+ // Refer : https://identitydivision.visualstudio.com/DevEx/_git/AuthLibrariesApiReview?path=%2FPinAuthToRegion%2FAAD%20SDK%20Proposal%20to%20Pin%20Auth%20to%20region.md&_a=preview&version=GBdev
+ // Set a 2 second timeout for this http client which only does calls to IMDS endpoint
+ client := http.Client{
+ Timeout: time.Duration(2 * time.Second),
+ }
+ req, _ := http.NewRequest("GET", imdsEndpoint, nil)
+ req.Header.Set("Metadata", "true")
+ resp, err := client.Do(req)
+ // If the request times out or there is an error, it is retried once
+ if err != nil || resp.StatusCode != 200 {
+ resp, err = client.Do(req)
+ if err != nil || resp.StatusCode != 200 {
+ return ""
+ }
+ }
+ defer resp.Body.Close()
+ response, err := io.ReadAll(resp.Body)
+ if err != nil {
+ return ""
+ }
+ return string(response)
+}
+
+func (a *AuthParams) CacheKey(isAppCache bool) string {
+ if a.AuthorizationType == ATOnBehalfOf {
+ return a.AssertionHash()
+ }
+ if a.AuthorizationType == ATClientCredentials || isAppCache {
+ return a.AppKey()
+ }
+ if a.AuthorizationType == ATRefreshToken || a.AuthorizationType == AccountByID {
+ return a.HomeAccountID
+ }
+ return ""
+}
+func (a *AuthParams) AssertionHash() string {
+ hasher := sha256.New()
+ // Per documentation this never returns an error : https://pkg.go.dev/hash#pkg-types
+ _, _ = hasher.Write([]byte(a.UserAssertion))
+ sha := base64.URLEncoding.EncodeToString(hasher.Sum(nil))
+ return sha
+}
+
+func (a *AuthParams) AppKey() string {
+ if a.AuthorityInfo.Tenant != "" {
+ return fmt.Sprintf("%s_%s_AppTokenCache", a.ClientID, a.AuthorityInfo.Tenant)
+ }
+ return fmt.Sprintf("%s__AppTokenCache", a.ClientID)
+}
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authorizetype_string.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authorizetype_string.go
new file mode 100644
index 00000000000..10039773b06
--- /dev/null
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authorizetype_string.go
@@ -0,0 +1,30 @@
+// Code generated by "stringer -type=AuthorizeType"; DO NOT EDIT.
+
+package authority
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[ATUnknown-0]
+ _ = x[ATUsernamePassword-1]
+ _ = x[ATWindowsIntegrated-2]
+ _ = x[ATAuthCode-3]
+ _ = x[ATInteractive-4]
+ _ = x[ATClientCredentials-5]
+ _ = x[ATDeviceCode-6]
+ _ = x[ATRefreshToken-7]
+}
+
+const _AuthorizeType_name = "ATUnknownATUsernamePasswordATWindowsIntegratedATAuthCodeATInteractiveATClientCredentialsATDeviceCodeATRefreshToken"
+
+var _AuthorizeType_index = [...]uint8{0, 9, 27, 46, 56, 69, 88, 100, 114}
+
+func (i AuthorizeType) String() string {
+ if i < 0 || i >= AuthorizeType(len(_AuthorizeType_index)-1) {
+ return "AuthorizeType(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+ return _AuthorizeType_name[_AuthorizeType_index[i]:_AuthorizeType_index[i+1]]
+}
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm/comm.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm/comm.go
new file mode 100644
index 00000000000..7d9ec7cd374
--- /dev/null
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm/comm.go
@@ -0,0 +1,320 @@
+// Copyright (c) Microsoft Corporation.
+// Licensed under the MIT license.
+
+// Package comm provides helpers for communicating with HTTP backends.
+package comm
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "encoding/xml"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "reflect"
+ "runtime"
+ "strings"
+ "time"
+
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors"
+ customJSON "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json"
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/version"
+ "github.com/google/uuid"
+)
+
+// HTTPClient represents an HTTP client.
+// It's usually an *http.Client from the standard library.
+type HTTPClient interface {
+ // Do sends an HTTP request and returns an HTTP response.
+ Do(req *http.Request) (*http.Response, error)
+
+ // CloseIdleConnections closes any idle connections in a "keep-alive" state.
+ CloseIdleConnections()
+}
+
+// Client provides a wrapper to our *http.Client that handles compression and serialization needs.
+type Client struct {
+ client HTTPClient
+}
+
+// New returns a new Client object.
+func New(httpClient HTTPClient) *Client {
+ if httpClient == nil {
+ panic("http.Client cannot == nil")
+ }
+
+ return &Client{client: httpClient}
+}
+
+// JSONCall connects to the REST endpoint passing the HTTP query values, headers and JSON conversion
+// of body in the HTTP body. It automatically handles compression and decompression with gzip. The response is JSON
+// unmarshalled into resp. resp must be a pointer to a struct. If the body struct contains a field called
+// "AdditionalFields" we use a custom marshal/unmarshal engine.
+func (c *Client) JSONCall(ctx context.Context, endpoint string, headers http.Header, qv url.Values, body, resp interface{}) error {
+ if qv == nil {
+ qv = url.Values{}
+ }
+
+ v := reflect.ValueOf(resp)
+ if err := c.checkResp(v); err != nil {
+ return err
+ }
+
+ // Choose a JSON marshal/unmarshal depending on if we have AdditionalFields attribute.
+ var marshal = json.Marshal
+ var unmarshal = json.Unmarshal
+ if _, ok := v.Elem().Type().FieldByName("AdditionalFields"); ok {
+ marshal = customJSON.Marshal
+ unmarshal = customJSON.Unmarshal
+ }
+
+ u, err := url.Parse(endpoint)
+ if err != nil {
+ return fmt.Errorf("could not parse path URL(%s): %w", endpoint, err)
+ }
+ u.RawQuery = qv.Encode()
+
+ addStdHeaders(headers)
+
+ req := &http.Request{Method: http.MethodGet, URL: u, Header: headers}
+
+ if body != nil {
+ // Note: In case your wondering why we are not gzip encoding....
+ // I'm not sure if these various services support gzip on send.
+ headers.Add("Content-Type", "application/json; charset=utf-8")
+ data, err := marshal(body)
+ if err != nil {
+ return fmt.Errorf("bug: conn.Call(): could not marshal the body object: %w", err)
+ }
+ req.Body = io.NopCloser(bytes.NewBuffer(data))
+ req.Method = http.MethodPost
+ }
+
+ data, err := c.do(ctx, req)
+ if err != nil {
+ return err
+ }
+
+ if resp != nil {
+ if err := unmarshal(data, resp); err != nil {
+ return fmt.Errorf("json decode error: %w\njson message bytes were: %s", err, string(data))
+ }
+ }
+ return nil
+}
+
+// XMLCall connects to an endpoint and decodes the XML response into resp. This is used when
+// sending application/xml . If sending XML via SOAP, use SOAPCall().
+func (c *Client) XMLCall(ctx context.Context, endpoint string, headers http.Header, qv url.Values, resp interface{}) error {
+ if err := c.checkResp(reflect.ValueOf(resp)); err != nil {
+ return err
+ }
+
+ if qv == nil {
+ qv = url.Values{}
+ }
+
+ u, err := url.Parse(endpoint)
+ if err != nil {
+ return fmt.Errorf("could not parse path URL(%s): %w", endpoint, err)
+ }
+ u.RawQuery = qv.Encode()
+
+ headers.Set("Content-Type", "application/xml; charset=utf-8") // This was not set in he original Mex(), but...
+ addStdHeaders(headers)
+
+ return c.xmlCall(ctx, u, headers, "", resp)
+}
+
+// SOAPCall returns the SOAP message given an endpoint, action, body of the request and the response object to marshal into.
+func (c *Client) SOAPCall(ctx context.Context, endpoint, action string, headers http.Header, qv url.Values, body string, resp interface{}) error {
+ if body == "" {
+ return fmt.Errorf("cannot make a SOAP call with body set to empty string")
+ }
+
+ if err := c.checkResp(reflect.ValueOf(resp)); err != nil {
+ return err
+ }
+
+ if qv == nil {
+ qv = url.Values{}
+ }
+
+ u, err := url.Parse(endpoint)
+ if err != nil {
+ return fmt.Errorf("could not parse path URL(%s): %w", endpoint, err)
+ }
+ u.RawQuery = qv.Encode()
+
+ headers.Set("Content-Type", "application/soap+xml; charset=utf-8")
+ headers.Set("SOAPAction", action)
+ addStdHeaders(headers)
+
+ return c.xmlCall(ctx, u, headers, body, resp)
+}
+
+// xmlCall sends an XML in body and decodes into resp. This simply does the transport and relies on
+// an upper level call to set things such as SOAP parameters and Content-Type, if required.
+func (c *Client) xmlCall(ctx context.Context, u *url.URL, headers http.Header, body string, resp interface{}) error {
+ req := &http.Request{Method: http.MethodGet, URL: u, Header: headers}
+
+ if len(body) > 0 {
+ req.Method = http.MethodPost
+ req.Body = io.NopCloser(strings.NewReader(body))
+ }
+
+ data, err := c.do(ctx, req)
+ if err != nil {
+ return err
+ }
+
+ return xml.Unmarshal(data, resp)
+}
+
+// URLFormCall is used to make a call where we need to send application/x-www-form-urlencoded data
+// to the backend and receive JSON back. qv will be encoded into the request body.
+func (c *Client) URLFormCall(ctx context.Context, endpoint string, qv url.Values, resp interface{}) error {
+ if len(qv) == 0 {
+ return fmt.Errorf("URLFormCall() requires qv to have non-zero length")
+ }
+
+ if err := c.checkResp(reflect.ValueOf(resp)); err != nil {
+ return err
+ }
+
+ u, err := url.Parse(endpoint)
+ if err != nil {
+ return fmt.Errorf("could not parse path URL(%s): %w", endpoint, err)
+ }
+
+ headers := http.Header{}
+ headers.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8")
+ addStdHeaders(headers)
+
+ enc := qv.Encode()
+
+ req := &http.Request{
+ Method: http.MethodPost,
+ URL: u,
+ Header: headers,
+ ContentLength: int64(len(enc)),
+ Body: io.NopCloser(strings.NewReader(enc)),
+ GetBody: func() (io.ReadCloser, error) {
+ return io.NopCloser(strings.NewReader(enc)), nil
+ },
+ }
+
+ data, err := c.do(ctx, req)
+ if err != nil {
+ return err
+ }
+
+ v := reflect.ValueOf(resp)
+ if err := c.checkResp(v); err != nil {
+ return err
+ }
+
+ var unmarshal = json.Unmarshal
+ if _, ok := v.Elem().Type().FieldByName("AdditionalFields"); ok {
+ unmarshal = customJSON.Unmarshal
+ }
+ if resp != nil {
+ if err := unmarshal(data, resp); err != nil {
+ return fmt.Errorf("json decode error: %w\nraw message was: %s", err, string(data))
+ }
+ }
+ return nil
+}
+
+// do makes the HTTP call to the server and returns the contents of the body.
+func (c *Client) do(ctx context.Context, req *http.Request) ([]byte, error) {
+ if _, ok := ctx.Deadline(); !ok {
+ var cancel context.CancelFunc
+ ctx, cancel = context.WithTimeout(ctx, 30*time.Second)
+ defer cancel()
+ }
+ req = req.WithContext(ctx)
+
+ reply, err := c.client.Do(req)
+ if err != nil {
+ return nil, fmt.Errorf("server response error:\n %w", err)
+ }
+ defer reply.Body.Close()
+
+ data, err := c.readBody(reply)
+ if err != nil {
+ return nil, fmt.Errorf("could not read the body of an HTTP Response: %w", err)
+ }
+ reply.Body = io.NopCloser(bytes.NewBuffer(data))
+
+ // NOTE: This doesn't happen immediately after the call so that we can get an error message
+ // from the server and include it in our error.
+ switch reply.StatusCode {
+ case 200, 201:
+ default:
+ sd := strings.TrimSpace(string(data))
+ if sd != "" {
+ // We probably have the error in the body.
+ return nil, errors.CallErr{
+ Req: req,
+ Resp: reply,
+ Err: fmt.Errorf("http call(%s)(%s) error: reply status code was %d:\n%s", req.URL.String(), req.Method, reply.StatusCode, sd),
+ }
+ }
+ return nil, errors.CallErr{
+ Req: req,
+ Resp: reply,
+ Err: fmt.Errorf("http call(%s)(%s) error: reply status code was %d", req.URL.String(), req.Method, reply.StatusCode),
+ }
+ }
+
+ return data, nil
+}
+
+// checkResp checks a response object o make sure it is a pointer to a struct.
+func (c *Client) checkResp(v reflect.Value) error {
+ if v.Kind() != reflect.Ptr {
+ return fmt.Errorf("bug: resp argument must a *struct, was %T", v.Interface())
+ }
+ v = v.Elem()
+ if v.Kind() != reflect.Struct {
+ return fmt.Errorf("bug: resp argument must be a *struct, was %T", v.Interface())
+ }
+ return nil
+}
+
+// readBody reads the body out of an *http.Response. It supports gzip encoded responses.
+func (c *Client) readBody(resp *http.Response) ([]byte, error) {
+ var reader io.Reader = resp.Body
+ switch resp.Header.Get("Content-Encoding") {
+ case "":
+ // Do nothing
+ case "gzip":
+ reader = gzipDecompress(resp.Body)
+ default:
+ return nil, fmt.Errorf("bug: comm.Client.JSONCall(): content was send with unsupported content-encoding %s", resp.Header.Get("Content-Encoding"))
+ }
+ return io.ReadAll(reader)
+}
+
+var testID string
+
+// addStdHeaders adds the standard headers we use on all calls.
+func addStdHeaders(headers http.Header) http.Header {
+ headers.Set("Accept-Encoding", "gzip")
+ // So that I can have a static id for tests.
+ if testID != "" {
+ headers.Set("client-request-id", testID)
+ headers.Set("Return-Client-Request-Id", "false")
+ } else {
+ headers.Set("client-request-id", uuid.New().String())
+ headers.Set("Return-Client-Request-Id", "false")
+ }
+ headers.Set("x-client-sku", "MSAL.Go")
+ headers.Set("x-client-os", runtime.GOOS)
+ headers.Set("x-client-cpu", runtime.GOARCH)
+ headers.Set("x-client-ver", version.Version)
+ return headers
+}
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm/compress.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm/compress.go
new file mode 100644
index 00000000000..4d3dbfcf0a6
--- /dev/null
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm/compress.go
@@ -0,0 +1,33 @@
+// Copyright (c) Microsoft Corporation.
+// Licensed under the MIT license.
+
+package comm
+
+import (
+ "compress/gzip"
+ "io"
+)
+
+func gzipDecompress(r io.Reader) io.Reader {
+ gzipReader, _ := gzip.NewReader(r)
+
+ pipeOut, pipeIn := io.Pipe()
+ go func() {
+ // decompression bomb would have to come from Azure services.
+ // If we want to limit, we should do that in comm.do().
+ _, err := io.Copy(pipeIn, gzipReader) //nolint
+ if err != nil {
+ // don't need the error.
+ pipeIn.CloseWithError(err) //nolint
+ gzipReader.Close()
+ return
+ }
+ if err := gzipReader.Close(); err != nil {
+ // don't need the error.
+ pipeIn.CloseWithError(err) //nolint
+ return
+ }
+ pipeIn.Close()
+ }()
+ return pipeOut
+}
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/grant/grant.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/grant/grant.go
new file mode 100644
index 00000000000..b628f61ac08
--- /dev/null
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/grant/grant.go
@@ -0,0 +1,17 @@
+// Copyright (c) Microsoft Corporation.
+// Licensed under the MIT license.
+
+// Package grant holds types of grants issued by authorization services.
+package grant
+
+const (
+ Password = "password"
+ JWT = "urn:ietf:params:oauth:grant-type:jwt-bearer"
+ SAMLV1 = "urn:ietf:params:oauth:grant-type:saml1_1-bearer"
+ SAMLV2 = "urn:ietf:params:oauth:grant-type:saml2-bearer"
+ DeviceCode = "device_code"
+ AuthCode = "authorization_code"
+ RefreshToken = "refresh_token"
+ ClientCredential = "client_credentials"
+ ClientAssertion = "urn:ietf:params:oauth:client-assertion-type:jwt-bearer"
+)
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/ops.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/ops.go
new file mode 100644
index 00000000000..1f9c543fa3b
--- /dev/null
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/ops.go
@@ -0,0 +1,56 @@
+// Copyright (c) Microsoft Corporation.
+// Licensed under the MIT license.
+
+/*
+Package ops provides operations to various backend services using REST clients.
+
+The REST type provides several clients that can be used to communicate to backends.
+Usage is simple:
+
+ rest := ops.New()
+
+ // Creates an authority client and calls the UserRealm() method.
+ userRealm, err := rest.Authority().UserRealm(ctx, authParameters)
+ if err != nil {
+ // Do something
+ }
+*/
+package ops
+
+import (
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens"
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority"
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm"
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust"
+)
+
+// HTTPClient represents an HTTP client.
+// It's usually an *http.Client from the standard library.
+type HTTPClient = comm.HTTPClient
+
+// REST provides REST clients for communicating with various backends used by MSAL.
+type REST struct {
+ client *comm.Client
+}
+
+// New is the constructor for REST.
+func New(httpClient HTTPClient) *REST {
+ return &REST{client: comm.New(httpClient)}
+}
+
+// Authority returns a client for querying information about various authorities.
+func (r *REST) Authority() authority.Client {
+ return authority.Client{Comm: r.client}
+}
+
+// AccessTokens returns a client that can be used to get various access tokens for
+// authorization purposes.
+func (r *REST) AccessTokens() accesstokens.Client {
+ return accesstokens.Client{Comm: r.client}
+}
+
+// WSTrust provides access to various metadata in a WSTrust service. This data can
+// be used to gain tokens based on SAML data using the client provided by AccessTokens().
+func (r *REST) WSTrust() wstrust.Client {
+ return wstrust.Client{Comm: r.client}
+}
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/endpointtype_string.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/endpointtype_string.go
new file mode 100644
index 00000000000..a2bb6278ae5
--- /dev/null
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/endpointtype_string.go
@@ -0,0 +1,25 @@
+// Code generated by "stringer -type=endpointType"; DO NOT EDIT.
+
+package defs
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[etUnknown-0]
+ _ = x[etUsernamePassword-1]
+ _ = x[etWindowsTransport-2]
+}
+
+const _endpointType_name = "etUnknownetUsernamePasswordetWindowsTransport"
+
+var _endpointType_index = [...]uint8{0, 9, 27, 45}
+
+func (i endpointType) String() string {
+ if i < 0 || i >= endpointType(len(_endpointType_index)-1) {
+ return "endpointType(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+ return _endpointType_name[_endpointType_index[i]:_endpointType_index[i+1]]
+}
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/mex_document_definitions.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/mex_document_definitions.go
new file mode 100644
index 00000000000..6497270028d
--- /dev/null
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/mex_document_definitions.go
@@ -0,0 +1,394 @@
+// Copyright (c) Microsoft Corporation.
+// Licensed under the MIT license.
+
+package defs
+
+import "encoding/xml"
+
+type Definitions struct {
+ XMLName xml.Name `xml:"definitions"`
+ Text string `xml:",chardata"`
+ Name string `xml:"name,attr"`
+ TargetNamespace string `xml:"targetNamespace,attr"`
+ WSDL string `xml:"wsdl,attr"`
+ XSD string `xml:"xsd,attr"`
+ T string `xml:"t,attr"`
+ SOAPENC string `xml:"soapenc,attr"`
+ SOAP string `xml:"soap,attr"`
+ TNS string `xml:"tns,attr"`
+ MSC string `xml:"msc,attr"`
+ WSAM string `xml:"wsam,attr"`
+ SOAP12 string `xml:"soap12,attr"`
+ WSA10 string `xml:"wsa10,attr"`
+ WSA string `xml:"wsa,attr"`
+ WSAW string `xml:"wsaw,attr"`
+ WSX string `xml:"wsx,attr"`
+ WSAP string `xml:"wsap,attr"`
+ WSU string `xml:"wsu,attr"`
+ Trust string `xml:"trust,attr"`
+ WSP string `xml:"wsp,attr"`
+ Policy []Policy `xml:"Policy"`
+ Types Types `xml:"types"`
+ Message []Message `xml:"message"`
+ PortType []PortType `xml:"portType"`
+ Binding []Binding `xml:"binding"`
+ Service Service `xml:"service"`
+}
+
+type Policy struct {
+ Text string `xml:",chardata"`
+ ID string `xml:"Id,attr"`
+ ExactlyOne ExactlyOne `xml:"ExactlyOne"`
+}
+
+type ExactlyOne struct {
+ Text string `xml:",chardata"`
+ All All `xml:"All"`
+}
+
+type All struct {
+ Text string `xml:",chardata"`
+ NegotiateAuthentication NegotiateAuthentication `xml:"NegotiateAuthentication"`
+ TransportBinding TransportBinding `xml:"TransportBinding"`
+ UsingAddressing Text `xml:"UsingAddressing"`
+ EndorsingSupportingTokens EndorsingSupportingTokens `xml:"EndorsingSupportingTokens"`
+ WSS11 WSS11 `xml:"Wss11"`
+ Trust10 Trust10 `xml:"Trust10"`
+ SignedSupportingTokens SignedSupportingTokens `xml:"SignedSupportingTokens"`
+ Trust13 WSTrust13 `xml:"Trust13"`
+ SignedEncryptedSupportingTokens SignedEncryptedSupportingTokens `xml:"SignedEncryptedSupportingTokens"`
+}
+
+type NegotiateAuthentication struct {
+ Text string `xml:",chardata"`
+ HTTP string `xml:"http,attr"`
+ XMLName xml.Name
+}
+
+type TransportBinding struct {
+ Text string `xml:",chardata"`
+ SP string `xml:"sp,attr"`
+ Policy TransportBindingPolicy `xml:"Policy"`
+}
+
+type TransportBindingPolicy struct {
+ Text string `xml:",chardata"`
+ TransportToken TransportToken `xml:"TransportToken"`
+ AlgorithmSuite AlgorithmSuite `xml:"AlgorithmSuite"`
+ Layout Layout `xml:"Layout"`
+ IncludeTimestamp Text `xml:"IncludeTimestamp"`
+}
+
+type TransportToken struct {
+ Text string `xml:",chardata"`
+ Policy TransportTokenPolicy `xml:"Policy"`
+}
+
+type TransportTokenPolicy struct {
+ Text string `xml:",chardata"`
+ HTTPSToken HTTPSToken `xml:"HttpsToken"`
+}
+
+type HTTPSToken struct {
+ Text string `xml:",chardata"`
+ RequireClientCertificate string `xml:"RequireClientCertificate,attr"`
+}
+
+type AlgorithmSuite struct {
+ Text string `xml:",chardata"`
+ Policy AlgorithmSuitePolicy `xml:"Policy"`
+}
+
+type AlgorithmSuitePolicy struct {
+ Text string `xml:",chardata"`
+ Basic256 Text `xml:"Basic256"`
+ Basic128 Text `xml:"Basic128"`
+}
+
+type Layout struct {
+ Text string `xml:",chardata"`
+ Policy LayoutPolicy `xml:"Policy"`
+}
+
+type LayoutPolicy struct {
+ Text string `xml:",chardata"`
+ Strict Text `xml:"Strict"`
+}
+
+type EndorsingSupportingTokens struct {
+ Text string `xml:",chardata"`
+ SP string `xml:"sp,attr"`
+ Policy EndorsingSupportingTokensPolicy `xml:"Policy"`
+}
+
+type EndorsingSupportingTokensPolicy struct {
+ Text string `xml:",chardata"`
+ X509Token X509Token `xml:"X509Token"`
+ RSAToken RSAToken `xml:"RsaToken"`
+ SignedParts SignedParts `xml:"SignedParts"`
+ KerberosToken KerberosToken `xml:"KerberosToken"`
+ IssuedToken IssuedToken `xml:"IssuedToken"`
+ KeyValueToken KeyValueToken `xml:"KeyValueToken"`
+}
+
+type X509Token struct {
+ Text string `xml:",chardata"`
+ IncludeToken string `xml:"IncludeToken,attr"`
+ Policy X509TokenPolicy `xml:"Policy"`
+}
+
+type X509TokenPolicy struct {
+ Text string `xml:",chardata"`
+ RequireThumbprintReference Text `xml:"RequireThumbprintReference"`
+ WSSX509V3Token10 Text `xml:"WssX509V3Token10"`
+}
+
+type RSAToken struct {
+ Text string `xml:",chardata"`
+ IncludeToken string `xml:"IncludeToken,attr"`
+ Optional string `xml:"Optional,attr"`
+ MSSP string `xml:"mssp,attr"`
+}
+
+type SignedParts struct {
+ Text string `xml:",chardata"`
+ Header SignedPartsHeader `xml:"Header"`
+}
+
+type SignedPartsHeader struct {
+ Text string `xml:",chardata"`
+ Name string `xml:"Name,attr"`
+ Namespace string `xml:"Namespace,attr"`
+}
+
+type KerberosToken struct {
+ Text string `xml:",chardata"`
+ IncludeToken string `xml:"IncludeToken,attr"`
+ Policy KerberosTokenPolicy `xml:"Policy"`
+}
+
+type KerberosTokenPolicy struct {
+ Text string `xml:",chardata"`
+ WSSGSSKerberosV5ApReqToken11 Text `xml:"WssGssKerberosV5ApReqToken11"`
+}
+
+type IssuedToken struct {
+ Text string `xml:",chardata"`
+ IncludeToken string `xml:"IncludeToken,attr"`
+ RequestSecurityTokenTemplate RequestSecurityTokenTemplate `xml:"RequestSecurityTokenTemplate"`
+ Policy IssuedTokenPolicy `xml:"Policy"`
+}
+
+type RequestSecurityTokenTemplate struct {
+ Text string `xml:",chardata"`
+ KeyType Text `xml:"KeyType"`
+ EncryptWith Text `xml:"EncryptWith"`
+ SignatureAlgorithm Text `xml:"SignatureAlgorithm"`
+ CanonicalizationAlgorithm Text `xml:"CanonicalizationAlgorithm"`
+ EncryptionAlgorithm Text `xml:"EncryptionAlgorithm"`
+ KeySize Text `xml:"KeySize"`
+ KeyWrapAlgorithm Text `xml:"KeyWrapAlgorithm"`
+}
+
+type IssuedTokenPolicy struct {
+ Text string `xml:",chardata"`
+ RequireInternalReference Text `xml:"RequireInternalReference"`
+}
+
+type KeyValueToken struct {
+ Text string `xml:",chardata"`
+ IncludeToken string `xml:"IncludeToken,attr"`
+ Optional string `xml:"Optional,attr"`
+}
+
+type WSS11 struct {
+ Text string `xml:",chardata"`
+ SP string `xml:"sp,attr"`
+ Policy Wss11Policy `xml:"Policy"`
+}
+
+type Wss11Policy struct {
+ Text string `xml:",chardata"`
+ MustSupportRefThumbprint Text `xml:"MustSupportRefThumbprint"`
+}
+
+type Trust10 struct {
+ Text string `xml:",chardata"`
+ SP string `xml:"sp,attr"`
+ Policy Trust10Policy `xml:"Policy"`
+}
+
+type Trust10Policy struct {
+ Text string `xml:",chardata"`
+ MustSupportIssuedTokens Text `xml:"MustSupportIssuedTokens"`
+ RequireClientEntropy Text `xml:"RequireClientEntropy"`
+ RequireServerEntropy Text `xml:"RequireServerEntropy"`
+}
+
+type SignedSupportingTokens struct {
+ Text string `xml:",chardata"`
+ SP string `xml:"sp,attr"`
+ Policy SupportingTokensPolicy `xml:"Policy"`
+}
+
+type SupportingTokensPolicy struct {
+ Text string `xml:",chardata"`
+ UsernameToken UsernameToken `xml:"UsernameToken"`
+}
+type UsernameToken struct {
+ Text string `xml:",chardata"`
+ IncludeToken string `xml:"IncludeToken,attr"`
+ Policy UsernameTokenPolicy `xml:"Policy"`
+}
+
+type UsernameTokenPolicy struct {
+ Text string `xml:",chardata"`
+ WSSUsernameToken10 WSSUsernameToken10 `xml:"WssUsernameToken10"`
+}
+
+type WSSUsernameToken10 struct {
+ Text string `xml:",chardata"`
+ XMLName xml.Name
+}
+
+type WSTrust13 struct {
+ Text string `xml:",chardata"`
+ SP string `xml:"sp,attr"`
+ Policy WSTrust13Policy `xml:"Policy"`
+}
+
+type WSTrust13Policy struct {
+ Text string `xml:",chardata"`
+ MustSupportIssuedTokens Text `xml:"MustSupportIssuedTokens"`
+ RequireClientEntropy Text `xml:"RequireClientEntropy"`
+ RequireServerEntropy Text `xml:"RequireServerEntropy"`
+}
+
+type SignedEncryptedSupportingTokens struct {
+ Text string `xml:",chardata"`
+ SP string `xml:"sp,attr"`
+ Policy SupportingTokensPolicy `xml:"Policy"`
+}
+
+type Types struct {
+ Text string `xml:",chardata"`
+ Schema Schema `xml:"schema"`
+}
+
+type Schema struct {
+ Text string `xml:",chardata"`
+ TargetNamespace string `xml:"targetNamespace,attr"`
+ Import []Import `xml:"import"`
+}
+
+type Import struct {
+ Text string `xml:",chardata"`
+ SchemaLocation string `xml:"schemaLocation,attr"`
+ Namespace string `xml:"namespace,attr"`
+}
+
+type Message struct {
+ Text string `xml:",chardata"`
+ Name string `xml:"name,attr"`
+ Part Part `xml:"part"`
+}
+
+type Part struct {
+ Text string `xml:",chardata"`
+ Name string `xml:"name,attr"`
+ Element string `xml:"element,attr"`
+}
+
+type PortType struct {
+ Text string `xml:",chardata"`
+ Name string `xml:"name,attr"`
+ Operation Operation `xml:"operation"`
+}
+
+type Operation struct {
+ Text string `xml:",chardata"`
+ Name string `xml:"name,attr"`
+ Input OperationIO `xml:"input"`
+ Output OperationIO `xml:"output"`
+}
+
+type OperationIO struct {
+ Text string `xml:",chardata"`
+ Action string `xml:"Action,attr"`
+ Message string `xml:"message,attr"`
+ Body OperationIOBody `xml:"body"`
+}
+
+type OperationIOBody struct {
+ Text string `xml:",chardata"`
+ Use string `xml:"use,attr"`
+}
+
+type Binding struct {
+ Text string `xml:",chardata"`
+ Name string `xml:"name,attr"`
+ Type string `xml:"type,attr"`
+ PolicyReference PolicyReference `xml:"PolicyReference"`
+ Binding DefinitionsBinding `xml:"binding"`
+ Operation BindingOperation `xml:"operation"`
+}
+
+type PolicyReference struct {
+ Text string `xml:",chardata"`
+ URI string `xml:"URI,attr"`
+}
+
+type DefinitionsBinding struct {
+ Text string `xml:",chardata"`
+ Transport string `xml:"transport,attr"`
+}
+
+type BindingOperation struct {
+ Text string `xml:",chardata"`
+ Name string `xml:"name,attr"`
+ Operation BindingOperationOperation `xml:"operation"`
+ Input BindingOperationIO `xml:"input"`
+ Output BindingOperationIO `xml:"output"`
+}
+
+type BindingOperationOperation struct {
+ Text string `xml:",chardata"`
+ SoapAction string `xml:"soapAction,attr"`
+ Style string `xml:"style,attr"`
+}
+
+type BindingOperationIO struct {
+ Text string `xml:",chardata"`
+ Body OperationIOBody `xml:"body"`
+}
+
+type Service struct {
+ Text string `xml:",chardata"`
+ Name string `xml:"name,attr"`
+ Port []Port `xml:"port"`
+}
+
+type Port struct {
+ Text string `xml:",chardata"`
+ Name string `xml:"name,attr"`
+ Binding string `xml:"binding,attr"`
+ Address Address `xml:"address"`
+ EndpointReference PortEndpointReference `xml:"EndpointReference"`
+}
+
+type Address struct {
+ Text string `xml:",chardata"`
+ Location string `xml:"location,attr"`
+}
+
+type PortEndpointReference struct {
+ Text string `xml:",chardata"`
+ Address Text `xml:"Address"`
+ Identity Identity `xml:"Identity"`
+}
+
+type Identity struct {
+ Text string `xml:",chardata"`
+ XMLNS string `xml:"xmlns,attr"`
+ SPN Text `xml:"Spn"`
+}
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/saml_assertion_definitions.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/saml_assertion_definitions.go
new file mode 100644
index 00000000000..7d072556577
--- /dev/null
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/saml_assertion_definitions.go
@@ -0,0 +1,230 @@
+// Copyright (c) Microsoft Corporation.
+// Licensed under the MIT license.
+
+package defs
+
+import "encoding/xml"
+
+// TODO(msal): Someone (and it ain't gonna be me) needs to document these attributes or
+// at the least put a link to RFC.
+
+type SAMLDefinitions struct {
+ XMLName xml.Name `xml:"Envelope"`
+ Text string `xml:",chardata"`
+ S string `xml:"s,attr"`
+ A string `xml:"a,attr"`
+ U string `xml:"u,attr"`
+ Header Header `xml:"Header"`
+ Body Body `xml:"Body"`
+}
+
+type Header struct {
+ Text string `xml:",chardata"`
+ Action Action `xml:"Action"`
+ Security Security `xml:"Security"`
+}
+
+type Action struct {
+ Text string `xml:",chardata"`
+ MustUnderstand string `xml:"mustUnderstand,attr"`
+}
+
+type Security struct {
+ Text string `xml:",chardata"`
+ MustUnderstand string `xml:"mustUnderstand,attr"`
+ O string `xml:"o,attr"`
+ Timestamp Timestamp `xml:"Timestamp"`
+}
+
+type Timestamp struct {
+ Text string `xml:",chardata"`
+ ID string `xml:"Id,attr"`
+ Created Text `xml:"Created"`
+ Expires Text `xml:"Expires"`
+}
+
+type Text struct {
+ Text string `xml:",chardata"`
+}
+
+type Body struct {
+ Text string `xml:",chardata"`
+ RequestSecurityTokenResponseCollection RequestSecurityTokenResponseCollection `xml:"RequestSecurityTokenResponseCollection"`
+}
+
+type RequestSecurityTokenResponseCollection struct {
+ Text string `xml:",chardata"`
+ Trust string `xml:"trust,attr"`
+ RequestSecurityTokenResponse []RequestSecurityTokenResponse `xml:"RequestSecurityTokenResponse"`
+}
+
+type RequestSecurityTokenResponse struct {
+ Text string `xml:",chardata"`
+ Lifetime Lifetime `xml:"Lifetime"`
+ AppliesTo AppliesTo `xml:"AppliesTo"`
+ RequestedSecurityToken RequestedSecurityToken `xml:"RequestedSecurityToken"`
+ RequestedAttachedReference RequestedAttachedReference `xml:"RequestedAttachedReference"`
+ RequestedUnattachedReference RequestedUnattachedReference `xml:"RequestedUnattachedReference"`
+ TokenType Text `xml:"TokenType"`
+ RequestType Text `xml:"RequestType"`
+ KeyType Text `xml:"KeyType"`
+}
+
+type Lifetime struct {
+ Text string `xml:",chardata"`
+ Created WSUTimestamp `xml:"Created"`
+ Expires WSUTimestamp `xml:"Expires"`
+}
+
+type WSUTimestamp struct {
+ Text string `xml:",chardata"`
+ Wsu string `xml:"wsu,attr"`
+}
+
+type AppliesTo struct {
+ Text string `xml:",chardata"`
+ Wsp string `xml:"wsp,attr"`
+ EndpointReference EndpointReference `xml:"EndpointReference"`
+}
+
+type EndpointReference struct {
+ Text string `xml:",chardata"`
+ Wsa string `xml:"wsa,attr"`
+ Address Text `xml:"Address"`
+}
+
+type RequestedSecurityToken struct {
+ Text string `xml:",chardata"`
+ AssertionRawXML string `xml:",innerxml"`
+ Assertion Assertion `xml:"Assertion"`
+}
+
+type Assertion struct {
+ XMLName xml.Name // Normally its `xml:"Assertion"`, but I think they want to capture the xmlns
+ Text string `xml:",chardata"`
+ MajorVersion string `xml:"MajorVersion,attr"`
+ MinorVersion string `xml:"MinorVersion,attr"`
+ AssertionID string `xml:"AssertionID,attr"`
+ Issuer string `xml:"Issuer,attr"`
+ IssueInstant string `xml:"IssueInstant,attr"`
+ Saml string `xml:"saml,attr"`
+ Conditions Conditions `xml:"Conditions"`
+ AttributeStatement AttributeStatement `xml:"AttributeStatement"`
+ AuthenticationStatement AuthenticationStatement `xml:"AuthenticationStatement"`
+ Signature Signature `xml:"Signature"`
+}
+
+type Conditions struct {
+ Text string `xml:",chardata"`
+ NotBefore string `xml:"NotBefore,attr"`
+ NotOnOrAfter string `xml:"NotOnOrAfter,attr"`
+ AudienceRestrictionCondition AudienceRestrictionCondition `xml:"AudienceRestrictionCondition"`
+}
+
+type AudienceRestrictionCondition struct {
+ Text string `xml:",chardata"`
+ Audience Text `xml:"Audience"`
+}
+
+type AttributeStatement struct {
+ Text string `xml:",chardata"`
+ Subject Subject `xml:"Subject"`
+ Attribute []Attribute `xml:"Attribute"`
+}
+
+type Subject struct {
+ Text string `xml:",chardata"`
+ NameIdentifier NameIdentifier `xml:"NameIdentifier"`
+ SubjectConfirmation SubjectConfirmation `xml:"SubjectConfirmation"`
+}
+
+type NameIdentifier struct {
+ Text string `xml:",chardata"`
+ Format string `xml:"Format,attr"`
+}
+
+type SubjectConfirmation struct {
+ Text string `xml:",chardata"`
+ ConfirmationMethod Text `xml:"ConfirmationMethod"`
+}
+
+type Attribute struct {
+ Text string `xml:",chardata"`
+ AttributeName string `xml:"AttributeName,attr"`
+ AttributeNamespace string `xml:"AttributeNamespace,attr"`
+ AttributeValue Text `xml:"AttributeValue"`
+}
+
+type AuthenticationStatement struct {
+ Text string `xml:",chardata"`
+ AuthenticationMethod string `xml:"AuthenticationMethod,attr"`
+ AuthenticationInstant string `xml:"AuthenticationInstant,attr"`
+ Subject Subject `xml:"Subject"`
+}
+
+type Signature struct {
+ Text string `xml:",chardata"`
+ Ds string `xml:"ds,attr"`
+ SignedInfo SignedInfo `xml:"SignedInfo"`
+ SignatureValue Text `xml:"SignatureValue"`
+ KeyInfo KeyInfo `xml:"KeyInfo"`
+}
+
+type SignedInfo struct {
+ Text string `xml:",chardata"`
+ CanonicalizationMethod Method `xml:"CanonicalizationMethod"`
+ SignatureMethod Method `xml:"SignatureMethod"`
+ Reference Reference `xml:"Reference"`
+}
+
+type Method struct {
+ Text string `xml:",chardata"`
+ Algorithm string `xml:"Algorithm,attr"`
+}
+
+type Reference struct {
+ Text string `xml:",chardata"`
+ URI string `xml:"URI,attr"`
+ Transforms Transforms `xml:"Transforms"`
+ DigestMethod Method `xml:"DigestMethod"`
+ DigestValue Text `xml:"DigestValue"`
+}
+
+type Transforms struct {
+ Text string `xml:",chardata"`
+ Transform []Method `xml:"Transform"`
+}
+
+type KeyInfo struct {
+ Text string `xml:",chardata"`
+ Xmlns string `xml:"xmlns,attr"`
+ X509Data X509Data `xml:"X509Data"`
+}
+
+type X509Data struct {
+ Text string `xml:",chardata"`
+ X509Certificate Text `xml:"X509Certificate"`
+}
+
+type RequestedAttachedReference struct {
+ Text string `xml:",chardata"`
+ SecurityTokenReference SecurityTokenReference `xml:"SecurityTokenReference"`
+}
+
+type SecurityTokenReference struct {
+ Text string `xml:",chardata"`
+ TokenType string `xml:"TokenType,attr"`
+ O string `xml:"o,attr"`
+ K string `xml:"k,attr"`
+ KeyIdentifier KeyIdentifier `xml:"KeyIdentifier"`
+}
+
+type KeyIdentifier struct {
+ Text string `xml:",chardata"`
+ ValueType string `xml:"ValueType,attr"`
+}
+
+type RequestedUnattachedReference struct {
+ Text string `xml:",chardata"`
+ SecurityTokenReference SecurityTokenReference `xml:"SecurityTokenReference"`
+}
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/version_string.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/version_string.go
new file mode 100644
index 00000000000..6fe5efa8a9a
--- /dev/null
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/version_string.go
@@ -0,0 +1,25 @@
+// Code generated by "stringer -type=Version"; DO NOT EDIT.
+
+package defs
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[TrustUnknown-0]
+ _ = x[Trust2005-1]
+ _ = x[Trust13-2]
+}
+
+const _Version_name = "TrustUnknownTrust2005Trust13"
+
+var _Version_index = [...]uint8{0, 12, 21, 28}
+
+func (i Version) String() string {
+ if i < 0 || i >= Version(len(_Version_index)-1) {
+ return "Version(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+ return _Version_name[_Version_index[i]:_Version_index[i+1]]
+}
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/wstrust_endpoint.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/wstrust_endpoint.go
new file mode 100644
index 00000000000..8fad5efb5de
--- /dev/null
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/wstrust_endpoint.go
@@ -0,0 +1,199 @@
+// Copyright (c) Microsoft Corporation.
+// Licensed under the MIT license.
+
+package defs
+
+import (
+ "encoding/xml"
+ "fmt"
+ "time"
+
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority"
+ uuid "github.com/google/uuid"
+)
+
+//go:generate stringer -type=Version
+
+type Version int
+
+const (
+ TrustUnknown Version = iota
+ Trust2005
+ Trust13
+)
+
+// Endpoint represents a WSTrust endpoint.
+type Endpoint struct {
+ // Version is the version of the endpoint.
+ Version Version
+ // URL is the URL of the endpoint.
+ URL string
+}
+
+type wsTrustTokenRequestEnvelope struct {
+ XMLName xml.Name `xml:"s:Envelope"`
+ Text string `xml:",chardata"`
+ S string `xml:"xmlns:s,attr"`
+ Wsa string `xml:"xmlns:wsa,attr"`
+ Wsu string `xml:"xmlns:wsu,attr"`
+ Header struct {
+ Text string `xml:",chardata"`
+ Action struct {
+ Text string `xml:",chardata"`
+ MustUnderstand string `xml:"s:mustUnderstand,attr"`
+ } `xml:"wsa:Action"`
+ MessageID struct {
+ Text string `xml:",chardata"`
+ } `xml:"wsa:messageID"`
+ ReplyTo struct {
+ Text string `xml:",chardata"`
+ Address struct {
+ Text string `xml:",chardata"`
+ } `xml:"wsa:Address"`
+ } `xml:"wsa:ReplyTo"`
+ To struct {
+ Text string `xml:",chardata"`
+ MustUnderstand string `xml:"s:mustUnderstand,attr"`
+ } `xml:"wsa:To"`
+ Security struct {
+ Text string `xml:",chardata"`
+ MustUnderstand string `xml:"s:mustUnderstand,attr"`
+ Wsse string `xml:"xmlns:wsse,attr"`
+ Timestamp struct {
+ Text string `xml:",chardata"`
+ ID string `xml:"wsu:Id,attr"`
+ Created struct {
+ Text string `xml:",chardata"`
+ } `xml:"wsu:Created"`
+ Expires struct {
+ Text string `xml:",chardata"`
+ } `xml:"wsu:Expires"`
+ } `xml:"wsu:Timestamp"`
+ UsernameToken struct {
+ Text string `xml:",chardata"`
+ ID string `xml:"wsu:Id,attr"`
+ Username struct {
+ Text string `xml:",chardata"`
+ } `xml:"wsse:Username"`
+ Password struct {
+ Text string `xml:",chardata"`
+ } `xml:"wsse:Password"`
+ } `xml:"wsse:UsernameToken"`
+ } `xml:"wsse:Security"`
+ } `xml:"s:Header"`
+ Body struct {
+ Text string `xml:",chardata"`
+ RequestSecurityToken struct {
+ Text string `xml:",chardata"`
+ Wst string `xml:"xmlns:wst,attr"`
+ AppliesTo struct {
+ Text string `xml:",chardata"`
+ Wsp string `xml:"xmlns:wsp,attr"`
+ EndpointReference struct {
+ Text string `xml:",chardata"`
+ Address struct {
+ Text string `xml:",chardata"`
+ } `xml:"wsa:Address"`
+ } `xml:"wsa:EndpointReference"`
+ } `xml:"wsp:AppliesTo"`
+ KeyType struct {
+ Text string `xml:",chardata"`
+ } `xml:"wst:KeyType"`
+ RequestType struct {
+ Text string `xml:",chardata"`
+ } `xml:"wst:RequestType"`
+ } `xml:"wst:RequestSecurityToken"`
+ } `xml:"s:Body"`
+}
+
+func buildTimeString(t time.Time) string {
+ // Golang time formats are weird: https://stackoverflow.com/questions/20234104/how-to-format-current-time-using-a-yyyymmddhhmmss-format
+ return t.Format("2006-01-02T15:04:05.000Z")
+}
+
+func (wte *Endpoint) buildTokenRequestMessage(authType authority.AuthorizeType, cloudAudienceURN string, username string, password string) (string, error) {
+ var soapAction string
+ var trustNamespace string
+ var keyType string
+ var requestType string
+
+ createdTime := time.Now().UTC()
+ expiresTime := createdTime.Add(10 * time.Minute)
+
+ switch wte.Version {
+ case Trust2005:
+ soapAction = trust2005Spec
+ trustNamespace = "http://schemas.xmlsoap.org/ws/2005/02/trust"
+ keyType = "http://schemas.xmlsoap.org/ws/2005/05/identity/NoProofKey"
+ requestType = "http://schemas.xmlsoap.org/ws/2005/02/trust/Issue"
+ case Trust13:
+ soapAction = trust13Spec
+ trustNamespace = "http://docs.oasis-open.org/ws-sx/ws-trust/200512"
+ keyType = "http://docs.oasis-open.org/ws-sx/ws-trust/200512/Bearer"
+ requestType = "http://docs.oasis-open.org/ws-sx/ws-trust/200512/Issue"
+ default:
+ return "", fmt.Errorf("buildTokenRequestMessage had Version == %q, which is not recognized", wte.Version)
+ }
+
+ var envelope wsTrustTokenRequestEnvelope
+
+ messageUUID := uuid.New()
+
+ envelope.S = "http://www.w3.org/2003/05/soap-envelope"
+ envelope.Wsa = "http://www.w3.org/2005/08/addressing"
+ envelope.Wsu = "http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-utility-1.0.xsd"
+
+ envelope.Header.Action.MustUnderstand = "1"
+ envelope.Header.Action.Text = soapAction
+ envelope.Header.MessageID.Text = "urn:uuid:" + messageUUID.String()
+ envelope.Header.ReplyTo.Address.Text = "http://www.w3.org/2005/08/addressing/anonymous"
+ envelope.Header.To.MustUnderstand = "1"
+ envelope.Header.To.Text = wte.URL
+
+ switch authType {
+ case authority.ATUnknown:
+ return "", fmt.Errorf("buildTokenRequestMessage had no authority type(%v)", authType)
+ case authority.ATUsernamePassword:
+ endpointUUID := uuid.New()
+
+ var trustID string
+ if wte.Version == Trust2005 {
+ trustID = "UnPwSecTok2005-" + endpointUUID.String()
+ } else {
+ trustID = "UnPwSecTok13-" + endpointUUID.String()
+ }
+
+ envelope.Header.Security.MustUnderstand = "1"
+ envelope.Header.Security.Wsse = "http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-secext-1.0.xsd"
+ envelope.Header.Security.Timestamp.ID = "MSATimeStamp"
+ envelope.Header.Security.Timestamp.Created.Text = buildTimeString(createdTime)
+ envelope.Header.Security.Timestamp.Expires.Text = buildTimeString(expiresTime)
+ envelope.Header.Security.UsernameToken.ID = trustID
+ envelope.Header.Security.UsernameToken.Username.Text = username
+ envelope.Header.Security.UsernameToken.Password.Text = password
+ default:
+ // This is just to note that we don't do anything for other cases.
+ // We aren't missing anything I know of.
+ }
+
+ envelope.Body.RequestSecurityToken.Wst = trustNamespace
+ envelope.Body.RequestSecurityToken.AppliesTo.Wsp = "http://schemas.xmlsoap.org/ws/2004/09/policy"
+ envelope.Body.RequestSecurityToken.AppliesTo.EndpointReference.Address.Text = cloudAudienceURN
+ envelope.Body.RequestSecurityToken.KeyType.Text = keyType
+ envelope.Body.RequestSecurityToken.RequestType.Text = requestType
+
+ output, err := xml.Marshal(envelope)
+ if err != nil {
+ return "", err
+ }
+
+ return string(output), nil
+}
+
+func (wte *Endpoint) BuildTokenRequestMessageWIA(cloudAudienceURN string) (string, error) {
+ return wte.buildTokenRequestMessage(authority.ATWindowsIntegrated, cloudAudienceURN, "", "")
+}
+
+func (wte *Endpoint) BuildTokenRequestMessageUsernamePassword(cloudAudienceURN string, username string, password string) (string, error) {
+ return wte.buildTokenRequestMessage(authority.ATUsernamePassword, cloudAudienceURN, username, password)
+}
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/wstrust_mex_document.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/wstrust_mex_document.go
new file mode 100644
index 00000000000..e3d19886ebc
--- /dev/null
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/wstrust_mex_document.go
@@ -0,0 +1,159 @@
+// Copyright (c) Microsoft Corporation.
+// Licensed under the MIT license.
+
+package defs
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+)
+
+//go:generate stringer -type=endpointType
+
+type endpointType int
+
+const (
+ etUnknown endpointType = iota
+ etUsernamePassword
+ etWindowsTransport
+)
+
+type wsEndpointData struct {
+ Version Version
+ EndpointType endpointType
+}
+
+const trust13Spec string = "http://docs.oasis-open.org/ws-sx/ws-trust/200512/RST/Issue"
+const trust2005Spec string = "http://schemas.xmlsoap.org/ws/2005/02/trust/RST/Issue"
+
+type MexDocument struct {
+ UsernamePasswordEndpoint Endpoint
+ WindowsTransportEndpoint Endpoint
+ policies map[string]endpointType
+ bindings map[string]wsEndpointData
+}
+
+func updateEndpoint(cached *Endpoint, found Endpoint) {
+ if cached == nil || cached.Version == TrustUnknown {
+ *cached = found
+ return
+ }
+ if (*cached).Version == Trust2005 && found.Version == Trust13 {
+ *cached = found
+ return
+ }
+}
+
+// TODO(msal): Someone needs to write tests for everything below.
+
+// NewFromDef creates a new MexDocument.
+func NewFromDef(defs Definitions) (MexDocument, error) {
+ policies, err := policies(defs)
+ if err != nil {
+ return MexDocument{}, err
+ }
+
+ bindings, err := bindings(defs, policies)
+ if err != nil {
+ return MexDocument{}, err
+ }
+
+ userPass, windows, err := endpoints(defs, bindings)
+ if err != nil {
+ return MexDocument{}, err
+ }
+
+ return MexDocument{
+ UsernamePasswordEndpoint: userPass,
+ WindowsTransportEndpoint: windows,
+ policies: policies,
+ bindings: bindings,
+ }, nil
+}
+
+func policies(defs Definitions) (map[string]endpointType, error) {
+ policies := make(map[string]endpointType, len(defs.Policy))
+
+ for _, policy := range defs.Policy {
+ if policy.ExactlyOne.All.NegotiateAuthentication.XMLName.Local != "" {
+ if policy.ExactlyOne.All.TransportBinding.SP != "" && policy.ID != "" {
+ policies["#"+policy.ID] = etWindowsTransport
+ }
+ }
+
+ if policy.ExactlyOne.All.SignedEncryptedSupportingTokens.Policy.UsernameToken.Policy.WSSUsernameToken10.XMLName.Local != "" {
+ if policy.ExactlyOne.All.TransportBinding.SP != "" && policy.ID != "" {
+ policies["#"+policy.ID] = etUsernamePassword
+ }
+ }
+ if policy.ExactlyOne.All.SignedSupportingTokens.Policy.UsernameToken.Policy.WSSUsernameToken10.XMLName.Local != "" {
+ if policy.ExactlyOne.All.TransportBinding.SP != "" && policy.ID != "" {
+ policies["#"+policy.ID] = etUsernamePassword
+ }
+ }
+ }
+
+ if len(policies) == 0 {
+ return policies, errors.New("no policies for mex document")
+ }
+
+ return policies, nil
+}
+
+func bindings(defs Definitions, policies map[string]endpointType) (map[string]wsEndpointData, error) {
+ bindings := make(map[string]wsEndpointData, len(defs.Binding))
+
+ for _, binding := range defs.Binding {
+ policyName := binding.PolicyReference.URI
+ transport := binding.Binding.Transport
+
+ if transport == "http://schemas.xmlsoap.org/soap/http" {
+ if policy, ok := policies[policyName]; ok {
+ bindingName := binding.Name
+ specVersion := binding.Operation.Operation.SoapAction
+
+ if specVersion == trust13Spec {
+ bindings[bindingName] = wsEndpointData{Trust13, policy}
+ } else if specVersion == trust2005Spec {
+ bindings[bindingName] = wsEndpointData{Trust2005, policy}
+ } else {
+ return nil, errors.New("found unknown spec version in mex document")
+ }
+ }
+ }
+ }
+ return bindings, nil
+}
+
+func endpoints(defs Definitions, bindings map[string]wsEndpointData) (userPass, windows Endpoint, err error) {
+ for _, port := range defs.Service.Port {
+ bindingName := port.Binding
+
+ index := strings.Index(bindingName, ":")
+ if index != -1 {
+ bindingName = bindingName[index+1:]
+ }
+
+ if binding, ok := bindings[bindingName]; ok {
+ url := strings.TrimSpace(port.EndpointReference.Address.Text)
+ if url == "" {
+ return Endpoint{}, Endpoint{}, fmt.Errorf("MexDocument cannot have blank URL endpoint")
+ }
+ if binding.Version == TrustUnknown {
+ return Endpoint{}, Endpoint{}, fmt.Errorf("endpoint version unknown")
+ }
+ endpoint := Endpoint{Version: binding.Version, URL: url}
+
+ switch binding.EndpointType {
+ case etUsernamePassword:
+ updateEndpoint(&userPass, endpoint)
+ case etWindowsTransport:
+ updateEndpoint(&windows, endpoint)
+ default:
+ return Endpoint{}, Endpoint{}, errors.New("found unknown port type in MEX document")
+ }
+ }
+ }
+ return userPass, windows, nil
+}
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/wstrust.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/wstrust.go
new file mode 100644
index 00000000000..47cd4c692d6
--- /dev/null
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/wstrust.go
@@ -0,0 +1,136 @@
+// Copyright (c) Microsoft Corporation.
+// Licensed under the MIT license.
+
+/*
+Package wstrust provides a client for communicating with a WSTrust (https://en.wikipedia.org/wiki/WS-Trust#:~:text=WS%2DTrust%20is%20a%20WS,in%20a%20secure%20message%20exchange.)
+for the purposes of extracting metadata from the service. This data can be used to acquire
+tokens using the accesstokens.Client.GetAccessTokenFromSamlGrant() call.
+*/
+package wstrust
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "net/http"
+ "net/url"
+
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority"
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/grant"
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs"
+)
+
+type xmlCaller interface {
+ XMLCall(ctx context.Context, endpoint string, headers http.Header, qv url.Values, resp interface{}) error
+ SOAPCall(ctx context.Context, endpoint, action string, headers http.Header, qv url.Values, body string, resp interface{}) error
+}
+
+type SamlTokenInfo struct {
+ AssertionType string // Should be either constants SAMLV1Grant or SAMLV2Grant.
+ Assertion string
+}
+
+// Client represents the REST calls to get tokens from token generator backends.
+type Client struct {
+ // Comm provides the HTTP transport client.
+ Comm xmlCaller
+}
+
+// TODO(msal): This allows me to call Mex without having a real Def file on line 45.
+// This would fail because policies() would not find a policy. This is easy enough to
+// fix in test data, but.... Definitions is defined with built in structs. That needs
+// to be pulled apart and until then I have this hack in.
+var newFromDef = defs.NewFromDef
+
+// Mex provides metadata about a wstrust service.
+func (c Client) Mex(ctx context.Context, federationMetadataURL string) (defs.MexDocument, error) {
+ resp := defs.Definitions{}
+ err := c.Comm.XMLCall(
+ ctx,
+ federationMetadataURL,
+ http.Header{},
+ nil,
+ &resp,
+ )
+ if err != nil {
+ return defs.MexDocument{}, err
+ }
+
+ return newFromDef(resp)
+}
+
+const (
+ SoapActionDefault = "http://docs.oasis-open.org/ws-sx/ws-trust/200512/RST/Issue"
+
+ // Note: Commented out because this action is not supported. It was in the original code
+ // but only used in a switch where it errored. Since there was only one value, a default
+ // worked better. However, buildTokenRequestMessage() had 2005 support. I'm not actually
+ // sure what's going on here. It like we have half support. For now this is here just
+ // for documentation purposes in case we are going to add support.
+ //
+ // SoapActionWSTrust2005 = "http://schemas.xmlsoap.org/ws/2005/02/trust/RST/Issue"
+)
+
+// SAMLTokenInfo provides SAML information that is used to generate a SAML token.
+func (c Client) SAMLTokenInfo(ctx context.Context, authParameters authority.AuthParams, cloudAudienceURN string, endpoint defs.Endpoint) (SamlTokenInfo, error) {
+ var wsTrustRequestMessage string
+ var err error
+
+ switch authParameters.AuthorizationType {
+ case authority.ATWindowsIntegrated:
+ wsTrustRequestMessage, err = endpoint.BuildTokenRequestMessageWIA(cloudAudienceURN)
+ if err != nil {
+ return SamlTokenInfo{}, err
+ }
+ case authority.ATUsernamePassword:
+ wsTrustRequestMessage, err = endpoint.BuildTokenRequestMessageUsernamePassword(
+ cloudAudienceURN, authParameters.Username, authParameters.Password)
+ if err != nil {
+ return SamlTokenInfo{}, err
+ }
+ default:
+ return SamlTokenInfo{}, fmt.Errorf("unknown auth type %v", authParameters.AuthorizationType)
+ }
+
+ var soapAction string
+ switch endpoint.Version {
+ case defs.Trust13:
+ soapAction = SoapActionDefault
+ case defs.Trust2005:
+ return SamlTokenInfo{}, errors.New("WS Trust 2005 support is not implemented")
+ default:
+ return SamlTokenInfo{}, fmt.Errorf("the SOAP endpoint for a wstrust call had an invalid version: %v", endpoint.Version)
+ }
+
+ resp := defs.SAMLDefinitions{}
+ err = c.Comm.SOAPCall(ctx, endpoint.URL, soapAction, http.Header{}, nil, wsTrustRequestMessage, &resp)
+ if err != nil {
+ return SamlTokenInfo{}, err
+ }
+
+ return c.samlAssertion(resp)
+}
+
+const (
+ samlv1Assertion = "urn:oasis:names:tc:SAML:1.0:assertion"
+ samlv2Assertion = "urn:oasis:names:tc:SAML:2.0:assertion"
+)
+
+func (c Client) samlAssertion(def defs.SAMLDefinitions) (SamlTokenInfo, error) {
+ for _, tokenResponse := range def.Body.RequestSecurityTokenResponseCollection.RequestSecurityTokenResponse {
+ token := tokenResponse.RequestedSecurityToken
+ if token.Assertion.XMLName.Local != "" {
+ assertion := token.AssertionRawXML
+
+ samlVersion := token.Assertion.Saml
+ switch samlVersion {
+ case samlv1Assertion:
+ return SamlTokenInfo{AssertionType: grant.SAMLV1, Assertion: assertion}, nil
+ case samlv2Assertion:
+ return SamlTokenInfo{AssertionType: grant.SAMLV2, Assertion: assertion}, nil
+ }
+ return SamlTokenInfo{}, fmt.Errorf("couldn't parse SAML assertion, version unknown: %q", samlVersion)
+ }
+ }
+ return SamlTokenInfo{}, errors.New("unknown WS-Trust version")
+}
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/resolvers.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/resolvers.go
new file mode 100644
index 00000000000..893ef4814f7
--- /dev/null
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/resolvers.go
@@ -0,0 +1,152 @@
+// Copyright (c) Microsoft Corporation.
+// Licensed under the MIT license.
+
+// TODO(msal): Write some tests. The original code this came from didn't have tests and I'm too
+// tired at this point to do it. It, like many other *Manager code I found was broken because
+// they didn't have mutex protection.
+
+package oauth
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "strings"
+ "sync"
+
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops"
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority"
+)
+
+// ADFS is an active directory federation service authority type.
+const ADFS = "ADFS"
+
+type cacheEntry struct {
+ Endpoints authority.Endpoints
+ ValidForDomainsInList map[string]bool
+}
+
+func createcacheEntry(endpoints authority.Endpoints) cacheEntry {
+ return cacheEntry{endpoints, map[string]bool{}}
+}
+
+// AuthorityEndpoint retrieves endpoints from an authority for auth and token acquisition.
+type authorityEndpoint struct {
+ rest *ops.REST
+
+ mu sync.Mutex
+ cache map[string]cacheEntry
+}
+
+// newAuthorityEndpoint is the constructor for AuthorityEndpoint.
+func newAuthorityEndpoint(rest *ops.REST) *authorityEndpoint {
+ m := &authorityEndpoint{rest: rest, cache: map[string]cacheEntry{}}
+ return m
+}
+
+// ResolveEndpoints gets the authorization and token endpoints and creates an AuthorityEndpoints instance
+func (m *authorityEndpoint) ResolveEndpoints(ctx context.Context, authorityInfo authority.Info, userPrincipalName string) (authority.Endpoints, error) {
+ if authorityInfo.AuthorityType == ADFS && len(userPrincipalName) == 0 {
+ return authority.Endpoints{}, errors.New("UPN required for authority validation for ADFS")
+ }
+
+ if endpoints, found := m.cachedEndpoints(authorityInfo, userPrincipalName); found {
+ return endpoints, nil
+ }
+
+ endpoint, err := m.openIDConfigurationEndpoint(ctx, authorityInfo, userPrincipalName)
+ if err != nil {
+ return authority.Endpoints{}, err
+ }
+
+ resp, err := m.rest.Authority().GetTenantDiscoveryResponse(ctx, endpoint)
+ if err != nil {
+ return authority.Endpoints{}, err
+ }
+ if err := resp.Validate(); err != nil {
+ return authority.Endpoints{}, fmt.Errorf("ResolveEndpoints(): %w", err)
+ }
+
+ tenant := authorityInfo.Tenant
+
+ endpoints := authority.NewEndpoints(
+ strings.Replace(resp.AuthorizationEndpoint, "{tenant}", tenant, -1),
+ strings.Replace(resp.TokenEndpoint, "{tenant}", tenant, -1),
+ strings.Replace(resp.Issuer, "{tenant}", tenant, -1),
+ authorityInfo.Host)
+
+ m.addCachedEndpoints(authorityInfo, userPrincipalName, endpoints)
+
+ return endpoints, nil
+}
+
+// cachedEndpoints returns a the cached endpoints if they exists. If not, we return false.
+func (m *authorityEndpoint) cachedEndpoints(authorityInfo authority.Info, userPrincipalName string) (authority.Endpoints, bool) {
+ m.mu.Lock()
+ defer m.mu.Unlock()
+
+ if cacheEntry, ok := m.cache[authorityInfo.CanonicalAuthorityURI]; ok {
+ if authorityInfo.AuthorityType == ADFS {
+ domain, err := adfsDomainFromUpn(userPrincipalName)
+ if err == nil {
+ if _, ok := cacheEntry.ValidForDomainsInList[domain]; ok {
+ return cacheEntry.Endpoints, true
+ }
+ }
+ }
+ return cacheEntry.Endpoints, true
+ }
+ return authority.Endpoints{}, false
+}
+
+func (m *authorityEndpoint) addCachedEndpoints(authorityInfo authority.Info, userPrincipalName string, endpoints authority.Endpoints) {
+ m.mu.Lock()
+ defer m.mu.Unlock()
+
+ updatedCacheEntry := createcacheEntry(endpoints)
+
+ if authorityInfo.AuthorityType == ADFS {
+ // Since we're here, we've made a call to the backend. We want to ensure we're caching
+ // the latest values from the server.
+ if cacheEntry, ok := m.cache[authorityInfo.CanonicalAuthorityURI]; ok {
+ for k := range cacheEntry.ValidForDomainsInList {
+ updatedCacheEntry.ValidForDomainsInList[k] = true
+ }
+ }
+ domain, err := adfsDomainFromUpn(userPrincipalName)
+ if err == nil {
+ updatedCacheEntry.ValidForDomainsInList[domain] = true
+ }
+ }
+
+ m.cache[authorityInfo.CanonicalAuthorityURI] = updatedCacheEntry
+}
+
+func (m *authorityEndpoint) openIDConfigurationEndpoint(ctx context.Context, authorityInfo authority.Info, userPrincipalName string) (string, error) {
+ if authorityInfo.Tenant == "adfs" {
+ return fmt.Sprintf("https://%s/adfs/.well-known/openid-configuration", authorityInfo.Host), nil
+ } else if authorityInfo.ValidateAuthority && !authority.TrustedHost(authorityInfo.Host) {
+ resp, err := m.rest.Authority().AADInstanceDiscovery(ctx, authorityInfo)
+ if err != nil {
+ return "", err
+ }
+ return resp.TenantDiscoveryEndpoint, nil
+ } else if authorityInfo.Region != "" {
+ resp, err := m.rest.Authority().AADInstanceDiscovery(ctx, authorityInfo)
+ if err != nil {
+ return "", err
+ }
+ return resp.TenantDiscoveryEndpoint, nil
+
+ }
+
+ return authorityInfo.CanonicalAuthorityURI + "v2.0/.well-known/openid-configuration", nil
+}
+
+func adfsDomainFromUpn(userPrincipalName string) (string, error) {
+ parts := strings.Split(userPrincipalName, "@")
+ if len(parts) < 2 {
+ return "", errors.New("no @ present in user principal name")
+ }
+ return parts[1], nil
+}
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared/shared.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared/shared.go
new file mode 100644
index 00000000000..f7e12a71bf3
--- /dev/null
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared/shared.go
@@ -0,0 +1,71 @@
+// Copyright (c) Microsoft Corporation.
+// Licensed under the MIT license.
+
+package shared
+
+import (
+ "net/http"
+ "reflect"
+ "strings"
+)
+
+const (
+ // CacheKeySeparator is used in creating the keys of the cache.
+ CacheKeySeparator = "-"
+)
+
+type Account struct {
+ HomeAccountID string `json:"home_account_id,omitempty"`
+ Environment string `json:"environment,omitempty"`
+ Realm string `json:"realm,omitempty"`
+ LocalAccountID string `json:"local_account_id,omitempty"`
+ AuthorityType string `json:"authority_type,omitempty"`
+ PreferredUsername string `json:"username,omitempty"`
+ GivenName string `json:"given_name,omitempty"`
+ FamilyName string `json:"family_name,omitempty"`
+ MiddleName string `json:"middle_name,omitempty"`
+ Name string `json:"name,omitempty"`
+ AlternativeID string `json:"alternative_account_id,omitempty"`
+ RawClientInfo string `json:"client_info,omitempty"`
+ UserAssertionHash string `json:"user_assertion_hash,omitempty"`
+
+ AdditionalFields map[string]interface{}
+}
+
+// NewAccount creates an account.
+func NewAccount(homeAccountID, env, realm, localAccountID, authorityType, username string) Account {
+ return Account{
+ HomeAccountID: homeAccountID,
+ Environment: env,
+ Realm: realm,
+ LocalAccountID: localAccountID,
+ AuthorityType: authorityType,
+ PreferredUsername: username,
+ }
+}
+
+// Key creates the key for storing accounts in the cache.
+func (acc Account) Key() string {
+ return strings.Join([]string{acc.HomeAccountID, acc.Environment, acc.Realm}, CacheKeySeparator)
+}
+
+// IsZero checks the zero value of account.
+func (acc Account) IsZero() bool {
+ v := reflect.ValueOf(acc)
+ for i := 0; i < v.NumField(); i++ {
+ field := v.Field(i)
+ if !field.IsZero() {
+ switch field.Kind() {
+ case reflect.Map, reflect.Slice:
+ if field.Len() == 0 {
+ continue
+ }
+ }
+ return false
+ }
+ }
+ return true
+}
+
+// DefaultClient is our default shared HTTP client.
+var DefaultClient = &http.Client{}
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/version/version.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/version/version.go
new file mode 100644
index 00000000000..5e1ea912912
--- /dev/null
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/version/version.go
@@ -0,0 +1,8 @@
+// Copyright (c) Microsoft Corporation.
+// Licensed under the MIT license.
+
+// Package version keeps the version number of the client package.
+package version
+
+// Version is the version of this client package that is communicated to the server.
+const Version = "0.7.0"
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/public/public.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/public/public.go
new file mode 100644
index 00000000000..19118c25a2c
--- /dev/null
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/public/public.go
@@ -0,0 +1,398 @@
+// Copyright (c) Microsoft Corporation.
+// Licensed under the MIT license.
+
+/*
+Package public provides a client for authentication of "public" applications. A "public"
+application is defined as an app that runs on client devices (android, ios, windows, linux, ...).
+These devices are "untrusted" and access resources via web APIs that must authenticate.
+*/
+package public
+
+/*
+Design note:
+
+public.Client uses client.Base as an embedded type. client.Base statically assigns its attributes
+during creation. As it doesn't have any pointers in it, anything borrowed from it, such as
+Base.AuthParams is a copy that is free to be manipulated here.
+*/
+
+// TODO(msal): This should have example code for each method on client using Go's example doc framework.
+// base usage details should be includee in the package documentation.
+
+import (
+ "context"
+ "crypto/rand"
+ "crypto/sha256"
+ "encoding/base64"
+ "fmt"
+ "net/url"
+ "strconv"
+
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache"
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base"
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/local"
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth"
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops"
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens"
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority"
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared"
+ "github.com/google/uuid"
+ "github.com/pkg/browser"
+)
+
+// AuthResult contains the results of one token acquisition operation.
+// For details see https://aka.ms/msal-net-authenticationresult
+type AuthResult = base.AuthResult
+
+type Account = shared.Account
+
+// Options configures the Client's behavior.
+type Options struct {
+ // Accessor controls cache persistence. By default there is no cache persistence.
+ // This can be set with the WithCache() option.
+ Accessor cache.ExportReplace
+
+ // The host of the Azure Active Directory authority. The default is https://login.microsoftonline.com/common.
+ // This can be changed with the WithAuthority() option.
+ Authority string
+
+ // The HTTP client used for making requests.
+ // It defaults to a shared http.Client.
+ HTTPClient ops.HTTPClient
+}
+
+func (p *Options) validate() error {
+ u, err := url.Parse(p.Authority)
+ if err != nil {
+ return fmt.Errorf("Authority options cannot be URL parsed: %w", err)
+ }
+ if u.Scheme != "https" {
+ return fmt.Errorf("Authority(%s) did not start with https://", u.String())
+ }
+ return nil
+}
+
+// Option is an optional argument to the New constructor.
+type Option func(o *Options)
+
+// WithAuthority allows for a custom authority to be set. This must be a valid https url.
+func WithAuthority(authority string) Option {
+ return func(o *Options) {
+ o.Authority = authority
+ }
+}
+
+// WithCache allows you to set some type of cache for storing authentication tokens.
+func WithCache(accessor cache.ExportReplace) Option {
+ return func(o *Options) {
+ o.Accessor = accessor
+ }
+}
+
+// WithHTTPClient allows for a custom HTTP client to be set.
+func WithHTTPClient(httpClient ops.HTTPClient) Option {
+ return func(o *Options) {
+ o.HTTPClient = httpClient
+ }
+}
+
+// Client is a representation of authentication client for public applications as defined in the
+// package doc. For more information, visit https://docs.microsoft.com/azure/active-directory/develop/msal-client-applications.
+type Client struct {
+ base base.Client
+}
+
+// New is the constructor for Client.
+func New(clientID string, options ...Option) (Client, error) {
+ opts := Options{
+ Authority: base.AuthorityPublicCloud,
+ HTTPClient: shared.DefaultClient,
+ }
+
+ for _, o := range options {
+ o(&opts)
+ }
+ if err := opts.validate(); err != nil {
+ return Client{}, err
+ }
+
+ base, err := base.New(clientID, opts.Authority, oauth.New(opts.HTTPClient), base.WithCacheAccessor(opts.Accessor))
+ if err != nil {
+ return Client{}, err
+ }
+ return Client{base}, nil
+}
+
+// CreateAuthCodeURL creates a URL used to acquire an authorization code.
+func (pca Client) CreateAuthCodeURL(ctx context.Context, clientID, redirectURI string, scopes []string) (string, error) {
+ return pca.base.AuthCodeURL(ctx, clientID, redirectURI, scopes, pca.base.AuthParams)
+}
+
+// AcquireTokenSilentOptions are all the optional settings to an AcquireTokenSilent() call.
+// These are set by using various AcquireTokenSilentOption functions.
+type AcquireTokenSilentOptions struct {
+ // Account represents the account to use. To set, use the WithSilentAccount() option.
+ Account Account
+}
+
+// AcquireTokenSilentOption changes options inside AcquireTokenSilentOptions used in .AcquireTokenSilent().
+type AcquireTokenSilentOption func(a *AcquireTokenSilentOptions)
+
+// WithSilentAccount uses the passed account during an AcquireTokenSilent() call.
+func WithSilentAccount(account Account) AcquireTokenSilentOption {
+ return func(a *AcquireTokenSilentOptions) {
+ a.Account = account
+ }
+}
+
+// AcquireTokenSilent acquires a token from either the cache or using a refresh token.
+func (pca Client) AcquireTokenSilent(ctx context.Context, scopes []string, options ...AcquireTokenSilentOption) (AuthResult, error) {
+ opts := AcquireTokenSilentOptions{}
+ for _, o := range options {
+ o(&opts)
+ }
+
+ silentParameters := base.AcquireTokenSilentParameters{
+ Scopes: scopes,
+ Account: opts.Account,
+ RequestType: accesstokens.ATPublic,
+ IsAppCache: false,
+ }
+
+ return pca.base.AcquireTokenSilent(ctx, silentParameters)
+}
+
+// AcquireTokenByUsernamePassword acquires a security token from the authority, via Username/Password Authentication.
+// NOTE: this flow is NOT recommended.
+func (pca Client) AcquireTokenByUsernamePassword(ctx context.Context, scopes []string, username string, password string) (AuthResult, error) {
+ authParams := pca.base.AuthParams
+ authParams.Scopes = scopes
+ authParams.AuthorizationType = authority.ATUsernamePassword
+ authParams.Username = username
+ authParams.Password = password
+
+ token, err := pca.base.Token.UsernamePassword(ctx, authParams)
+ if err != nil {
+ return AuthResult{}, err
+ }
+ return pca.base.AuthResultFromToken(ctx, authParams, token, true)
+}
+
+type DeviceCodeResult = accesstokens.DeviceCodeResult
+
+// DeviceCode provides the results of the device code flows first stage (containing the code)
+// that must be entered on the second device and provides a method to retrieve the AuthenticationResult
+// once that code has been entered and verified.
+type DeviceCode struct {
+ // Result holds the information about the device code (such as the code).
+ Result DeviceCodeResult
+
+ authParams authority.AuthParams
+ client Client
+ dc oauth.DeviceCode
+}
+
+// AuthenticationResult retreives the AuthenticationResult once the user enters the code
+// on the second device. Until then it blocks until the .AcquireTokenByDeviceCode() context
+// is cancelled or the token expires.
+func (d DeviceCode) AuthenticationResult(ctx context.Context) (AuthResult, error) {
+ token, err := d.dc.Token(ctx)
+ if err != nil {
+ return AuthResult{}, err
+ }
+ return d.client.base.AuthResultFromToken(ctx, d.authParams, token, true)
+}
+
+// AcquireTokenByDeviceCode acquires a security token from the authority, by acquiring a device code and using that to acquire the token.
+// Users need to create an AcquireTokenDeviceCodeParameters instance and pass it in.
+func (pca Client) AcquireTokenByDeviceCode(ctx context.Context, scopes []string) (DeviceCode, error) {
+ authParams := pca.base.AuthParams
+ authParams.Scopes = scopes
+ authParams.AuthorizationType = authority.ATDeviceCode
+
+ dc, err := pca.base.Token.DeviceCode(ctx, authParams)
+ if err != nil {
+ return DeviceCode{}, err
+ }
+
+ return DeviceCode{Result: dc.Result, authParams: authParams, client: pca, dc: dc}, nil
+}
+
+// AcquireTokenByAuthCodeOptions contains the optional parameters used to acquire an access token using the authorization code flow.
+type AcquireTokenByAuthCodeOptions struct {
+ Challenge string
+}
+
+// AcquireTokenByAuthCodeOption changes options inside AcquireTokenByAuthCodeOptions used in .AcquireTokenByAuthCode().
+type AcquireTokenByAuthCodeOption func(a *AcquireTokenByAuthCodeOptions)
+
+// WithChallenge allows you to provide a code for the .AcquireTokenByAuthCode() call.
+func WithChallenge(challenge string) AcquireTokenByAuthCodeOption {
+ return func(a *AcquireTokenByAuthCodeOptions) {
+ a.Challenge = challenge
+ }
+}
+
+// AcquireTokenByAuthCode is a request to acquire a security token from the authority, using an authorization code.
+// The specified redirect URI must be the same URI that was used when the authorization code was requested.
+func (pca Client) AcquireTokenByAuthCode(ctx context.Context, code string, redirectURI string, scopes []string, options ...AcquireTokenByAuthCodeOption) (AuthResult, error) {
+ opts := AcquireTokenByAuthCodeOptions{}
+ for _, o := range options {
+ o(&opts)
+ }
+
+ params := base.AcquireTokenAuthCodeParameters{
+ Scopes: scopes,
+ Code: code,
+ Challenge: opts.Challenge,
+ AppType: accesstokens.ATPublic,
+ RedirectURI: redirectURI,
+ }
+
+ return pca.base.AcquireTokenByAuthCode(ctx, params)
+}
+
+// Accounts gets all the accounts in the token cache.
+// If there are no accounts in the cache the returned slice is empty.
+func (pca Client) Accounts() []Account {
+ return pca.base.AllAccounts()
+}
+
+// RemoveAccount signs the account out and forgets account from token cache.
+func (pca Client) RemoveAccount(account Account) error {
+ pca.base.RemoveAccount(account)
+ return nil
+}
+
+// InteractiveAuthOptions contains the optional parameters used to acquire an access token for interactive auth code flow.
+type InteractiveAuthOptions struct {
+ // Used to specify a custom port for the local server. http://localhost:portnumber
+ // All other URI components are ignored.
+ RedirectURI string
+}
+
+// InteractiveAuthOption changes options inside InteractiveAuthOptions used in .AcquireTokenInteractive().
+type InteractiveAuthOption func(*InteractiveAuthOptions)
+
+// WithRedirectURI uses the specified redirect URI for interactive auth.
+func WithRedirectURI(redirectURI string) InteractiveAuthOption {
+ return func(o *InteractiveAuthOptions) {
+ o.RedirectURI = redirectURI
+ }
+}
+
+// AcquireTokenInteractive acquires a security token from the authority using the default web browser to select the account.
+// https://docs.microsoft.com/en-us/azure/active-directory/develop/msal-authentication-flows#interactive-and-non-interactive-authentication
+func (pca Client) AcquireTokenInteractive(ctx context.Context, scopes []string, options ...InteractiveAuthOption) (AuthResult, error) {
+ opts := InteractiveAuthOptions{}
+ for _, opt := range options {
+ opt(&opts)
+ }
+ // the code verifier is a random 32-byte sequence that's been base-64 encoded without padding.
+ // it's used to prevent MitM attacks during auth code flow, see https://tools.ietf.org/html/rfc7636
+ cv, challenge, err := codeVerifier()
+ if err != nil {
+ return AuthResult{}, err
+ }
+ var redirectURL *url.URL
+ if opts.RedirectURI != "" {
+ redirectURL, err = url.Parse(opts.RedirectURI)
+ if err != nil {
+ return AuthResult{}, err
+ }
+ }
+ authParams := pca.base.AuthParams // This is a copy, as we dont' have a pointer receiver and .AuthParams is not a pointer.
+ authParams.Scopes = scopes
+ authParams.AuthorizationType = authority.ATInteractive
+ authParams.CodeChallenge = challenge
+ authParams.CodeChallengeMethod = "S256"
+ authParams.State = uuid.New().String()
+ authParams.Prompt = "select_account"
+ res, err := pca.browserLogin(ctx, redirectURL, authParams)
+ if err != nil {
+ return AuthResult{}, err
+ }
+ authParams.Redirecturi = res.redirectURI
+
+ req, err := accesstokens.NewCodeChallengeRequest(authParams, accesstokens.ATPublic, nil, res.authCode, cv)
+ if err != nil {
+ return AuthResult{}, err
+ }
+
+ token, err := pca.base.Token.AuthCode(ctx, req)
+ if err != nil {
+ return AuthResult{}, err
+ }
+
+ return pca.base.AuthResultFromToken(ctx, authParams, token, true)
+}
+
+type interactiveAuthResult struct {
+ authCode string
+ redirectURI string
+}
+
+// provides a test hook to simulate opening a browser
+var browserOpenURL = func(authURL string) error {
+ return browser.OpenURL(authURL)
+}
+
+// parses the port number from the provided URL.
+// returns 0 if nil or no port is specified.
+func parsePort(u *url.URL) (int, error) {
+ if u == nil {
+ return 0, nil
+ }
+ p := u.Port()
+ if p == "" {
+ return 0, nil
+ }
+ return strconv.Atoi(p)
+}
+
+// browserLogin launches the system browser for interactive login
+func (pca Client) browserLogin(ctx context.Context, redirectURI *url.URL, params authority.AuthParams) (interactiveAuthResult, error) {
+ // start local redirect server so login can call us back
+ port, err := parsePort(redirectURI)
+ if err != nil {
+ return interactiveAuthResult{}, err
+ }
+ srv, err := local.New(params.State, port)
+ if err != nil {
+ return interactiveAuthResult{}, err
+ }
+ defer srv.Shutdown()
+ params.Scopes = accesstokens.AppendDefaultScopes(params)
+ authURL, err := pca.base.AuthCodeURL(ctx, params.ClientID, srv.Addr, params.Scopes, params)
+ if err != nil {
+ return interactiveAuthResult{}, err
+ }
+ // open browser window so user can select credentials
+ if err := browserOpenURL(authURL); err != nil {
+ return interactiveAuthResult{}, err
+ }
+ // now wait until the logic calls us back
+ res := srv.Result(ctx)
+ if res.Err != nil {
+ return interactiveAuthResult{}, res.Err
+ }
+ return interactiveAuthResult{
+ authCode: res.Code,
+ redirectURI: srv.Addr,
+ }, nil
+}
+
+// creates a code verifier string along with its SHA256 hash which
+// is used as the challenge when requesting an auth code.
+// used in interactive auth flow for PKCE.
+func codeVerifier() (codeVerifier string, challenge string, err error) {
+ cvBytes := make([]byte, 32)
+ if _, err = rand.Read(cvBytes); err != nil {
+ return
+ }
+ codeVerifier = base64.RawURLEncoding.EncodeToString(cvBytes)
+ // for PKCE, create a hash of the code verifier
+ cvh := sha256.Sum256([]byte(codeVerifier))
+ challenge = base64.RawURLEncoding.EncodeToString(cvh[:])
+ return
+}
diff --git a/vendor/github.com/jongio/azidext/go/azidext/LICENSE b/vendor/github.com/jongio/azidext/go/azidext/LICENSE
new file mode 100644
index 00000000000..21cbc94a68c
--- /dev/null
+++ b/vendor/github.com/jongio/azidext/go/azidext/LICENSE
@@ -0,0 +1,7 @@
+Copyright 2020 Jon Gallant
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
\ No newline at end of file
diff --git a/vendor/github.com/jongio/azidext/go/azidext/azure_identity_credential_adapter.go b/vendor/github.com/jongio/azidext/go/azidext/azure_identity_credential_adapter.go
new file mode 100644
index 00000000000..553b95a2715
--- /dev/null
+++ b/vendor/github.com/jongio/azidext/go/azidext/azure_identity_credential_adapter.go
@@ -0,0 +1,118 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+package azidext
+
+import (
+ "errors"
+ "net/http"
+
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
+ "github.com/Azure/azure-sdk-for-go/sdk/azidentity"
+ "github.com/Azure/go-autorest/autorest"
+)
+
+// NewTokenCredentialAdapter is used to adapt an azcore.TokenCredential to an autorest.Authorizer
+func NewTokenCredentialAdapter(credential azcore.TokenCredential, scopes []string) autorest.Authorizer {
+ tkPolicy := runtime.NewBearerTokenPolicy(credential, scopes, nil)
+ return &policyAdapter{
+ pl: runtime.NewPipeline("azidext", "v0.4.0", runtime.PipelineOptions{
+ PerRetry: []policy.Policy{tkPolicy, nullPolicy{}},
+ }, nil),
+ }
+}
+
+type policyAdapter struct {
+ pl runtime.Pipeline
+}
+
+// WithAuthorization implements the autorest.Authorizer interface for type policyAdapter.
+func (ca *policyAdapter) WithAuthorization() autorest.PrepareDecorator {
+ return func(p autorest.Preparer) autorest.Preparer {
+ return autorest.PreparerFunc(func(r *http.Request) (*http.Request, error) {
+ r, err := p.Prepare(r)
+ if err != nil {
+ return r, err
+ }
+ // create a dummy request
+ req, err := runtime.NewRequest(r.Context(), r.Method, r.URL.String())
+ if err != nil {
+ return r, err
+ }
+ _, err = ca.pl.Do(req)
+ // if the authentication failed due to invalid/missing credentials
+ // return a wrapped error so the retry policy won't kick in.
+ type nonRetriable interface {
+ NonRetriable()
+ }
+ var nre nonRetriable
+ if errors.As(err, &nre) {
+ return r, &tokenRefreshError{
+ inner: err,
+ }
+ }
+ // some other error
+ if err != nil {
+ return r, err
+ }
+ // copy the authorization header to the real request
+ const authHeader = "Authorization"
+ r.Header.Set(authHeader, req.Raw().Header.Get(authHeader))
+ return r, err
+ })
+ }
+}
+
+// DefaultManagementScope is the default credential scope for Azure Resource Management.
+const DefaultManagementScope = "https://management.azure.com//.default"
+
+// DefaultAzureCredentialOptions contains credential and authentication policy options.
+type DefaultAzureCredentialOptions struct {
+ // DefaultCredential contains configuration options passed to azidentity.NewDefaultAzureCredential().
+ // Set this to nil to accept the underlying default behavior.
+ DefaultCredential *azidentity.DefaultAzureCredentialOptions
+
+ // Scopes contains the list of permission scopes required for the token.
+ // Setting this to nil will use the DefaultManagementScope when acquiring a token.
+ Scopes []string
+}
+
+// NewDefaultAzureCredentialAdapter adapts azcore.NewDefaultAzureCredential to an autorest.Authorizer.
+func NewDefaultAzureCredentialAdapter(options *DefaultAzureCredentialOptions) (autorest.Authorizer, error) {
+ if options == nil {
+ options = &DefaultAzureCredentialOptions{
+ Scopes: []string{DefaultManagementScope},
+ }
+ }
+ cred, err := azidentity.NewDefaultAzureCredential(options.DefaultCredential)
+ if err != nil {
+ return nil, err
+ }
+ return NewTokenCredentialAdapter(cred, options.Scopes), nil
+}
+
+// dummy policy to terminate the pipeline
+type nullPolicy struct{}
+
+func (nullPolicy) Do(req *policy.Request) (*http.Response, error) {
+ return &http.Response{StatusCode: http.StatusOK}, nil
+}
+
+// error type returned to prevent the retry policy from retrying the request
+type tokenRefreshError struct {
+ inner error
+}
+
+func (t *tokenRefreshError) Error() string {
+ return t.inner.Error()
+}
+
+func (t *tokenRefreshError) Response() *http.Response {
+ return nil
+}
+
+func (t *tokenRefreshError) Unwrap() error {
+ return t.inner
+}
diff --git a/vendor/github.com/kylelemons/godebug/LICENSE b/vendor/github.com/kylelemons/godebug/LICENSE
new file mode 100644
index 00000000000..d6456956733
--- /dev/null
+++ b/vendor/github.com/kylelemons/godebug/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/kylelemons/godebug/diff/diff.go b/vendor/github.com/kylelemons/godebug/diff/diff.go
new file mode 100644
index 00000000000..200e596c625
--- /dev/null
+++ b/vendor/github.com/kylelemons/godebug/diff/diff.go
@@ -0,0 +1,186 @@
+// Copyright 2013 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package diff implements a linewise diff algorithm.
+package diff
+
+import (
+ "bytes"
+ "fmt"
+ "strings"
+)
+
+// Chunk represents a piece of the diff. A chunk will not have both added and
+// deleted lines. Equal lines are always after any added or deleted lines.
+// A Chunk may or may not have any lines in it, especially for the first or last
+// chunk in a computation.
+type Chunk struct {
+ Added []string
+ Deleted []string
+ Equal []string
+}
+
+func (c *Chunk) empty() bool {
+ return len(c.Added) == 0 && len(c.Deleted) == 0 && len(c.Equal) == 0
+}
+
+// Diff returns a string containing a line-by-line unified diff of the linewise
+// changes required to make A into B. Each line is prefixed with '+', '-', or
+// ' ' to indicate if it should be added, removed, or is correct respectively.
+func Diff(A, B string) string {
+ aLines := strings.Split(A, "\n")
+ bLines := strings.Split(B, "\n")
+
+ chunks := DiffChunks(aLines, bLines)
+
+ buf := new(bytes.Buffer)
+ for _, c := range chunks {
+ for _, line := range c.Added {
+ fmt.Fprintf(buf, "+%s\n", line)
+ }
+ for _, line := range c.Deleted {
+ fmt.Fprintf(buf, "-%s\n", line)
+ }
+ for _, line := range c.Equal {
+ fmt.Fprintf(buf, " %s\n", line)
+ }
+ }
+ return strings.TrimRight(buf.String(), "\n")
+}
+
+// DiffChunks uses an O(D(N+M)) shortest-edit-script algorithm
+// to compute the edits required from A to B and returns the
+// edit chunks.
+func DiffChunks(a, b []string) []Chunk {
+ // algorithm: http://www.xmailserver.org/diff2.pdf
+
+ // We'll need these quantities a lot.
+ alen, blen := len(a), len(b) // M, N
+
+ // At most, it will require len(a) deletions and len(b) additions
+ // to transform a into b.
+ maxPath := alen + blen // MAX
+ if maxPath == 0 {
+ // degenerate case: two empty lists are the same
+ return nil
+ }
+
+ // Store the endpoint of the path for diagonals.
+ // We store only the a index, because the b index on any diagonal
+ // (which we know during the loop below) is aidx-diag.
+ // endpoint[maxPath] represents the 0 diagonal.
+ //
+ // Stated differently:
+ // endpoint[d] contains the aidx of a furthest reaching path in diagonal d
+ endpoint := make([]int, 2*maxPath+1) // V
+
+ saved := make([][]int, 0, 8) // Vs
+ save := func() {
+ dup := make([]int, len(endpoint))
+ copy(dup, endpoint)
+ saved = append(saved, dup)
+ }
+
+ var editDistance int // D
+dLoop:
+ for editDistance = 0; editDistance <= maxPath; editDistance++ {
+ // The 0 diag(onal) represents equality of a and b. Each diagonal to
+ // the left is numbered one lower, to the right is one higher, from
+ // -alen to +blen. Negative diagonals favor differences from a,
+ // positive diagonals favor differences from b. The edit distance to a
+ // diagonal d cannot be shorter than d itself.
+ //
+ // The iterations of this loop cover either odds or evens, but not both,
+ // If odd indices are inputs, even indices are outputs and vice versa.
+ for diag := -editDistance; diag <= editDistance; diag += 2 { // k
+ var aidx int // x
+ switch {
+ case diag == -editDistance:
+ // This is a new diagonal; copy from previous iter
+ aidx = endpoint[maxPath-editDistance+1] + 0
+ case diag == editDistance:
+ // This is a new diagonal; copy from previous iter
+ aidx = endpoint[maxPath+editDistance-1] + 1
+ case endpoint[maxPath+diag+1] > endpoint[maxPath+diag-1]:
+ // diagonal d+1 was farther along, so use that
+ aidx = endpoint[maxPath+diag+1] + 0
+ default:
+ // diagonal d-1 was farther (or the same), so use that
+ aidx = endpoint[maxPath+diag-1] + 1
+ }
+ // On diagonal d, we can compute bidx from aidx.
+ bidx := aidx - diag // y
+ // See how far we can go on this diagonal before we find a difference.
+ for aidx < alen && bidx < blen && a[aidx] == b[bidx] {
+ aidx++
+ bidx++
+ }
+ // Store the end of the current edit chain.
+ endpoint[maxPath+diag] = aidx
+ // If we've found the end of both inputs, we're done!
+ if aidx >= alen && bidx >= blen {
+ save() // save the final path
+ break dLoop
+ }
+ }
+ save() // save the current path
+ }
+ if editDistance == 0 {
+ return nil
+ }
+ chunks := make([]Chunk, editDistance+1)
+
+ x, y := alen, blen
+ for d := editDistance; d > 0; d-- {
+ endpoint := saved[d]
+ diag := x - y
+ insert := diag == -d || (diag != d && endpoint[maxPath+diag-1] < endpoint[maxPath+diag+1])
+
+ x1 := endpoint[maxPath+diag]
+ var x0, xM, kk int
+ if insert {
+ kk = diag + 1
+ x0 = endpoint[maxPath+kk]
+ xM = x0
+ } else {
+ kk = diag - 1
+ x0 = endpoint[maxPath+kk]
+ xM = x0 + 1
+ }
+ y0 := x0 - kk
+
+ var c Chunk
+ if insert {
+ c.Added = b[y0:][:1]
+ } else {
+ c.Deleted = a[x0:][:1]
+ }
+ if xM < x1 {
+ c.Equal = a[xM:][:x1-xM]
+ }
+
+ x, y = x0, y0
+ chunks[d] = c
+ }
+ if x > 0 {
+ chunks[0].Equal = a[:x]
+ }
+ if chunks[0].empty() {
+ chunks = chunks[1:]
+ }
+ if len(chunks) == 0 {
+ return nil
+ }
+ return chunks
+}
diff --git a/vendor/github.com/kylelemons/godebug/pretty/.gitignore b/vendor/github.com/kylelemons/godebug/pretty/.gitignore
new file mode 100644
index 00000000000..fa9a735da3c
--- /dev/null
+++ b/vendor/github.com/kylelemons/godebug/pretty/.gitignore
@@ -0,0 +1,5 @@
+*.test
+*.bench
+*.golden
+*.txt
+*.prof
diff --git a/vendor/github.com/kylelemons/godebug/pretty/doc.go b/vendor/github.com/kylelemons/godebug/pretty/doc.go
new file mode 100644
index 00000000000..03b5718a70d
--- /dev/null
+++ b/vendor/github.com/kylelemons/godebug/pretty/doc.go
@@ -0,0 +1,25 @@
+// Copyright 2013 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package pretty pretty-prints Go structures.
+//
+// This package uses reflection to examine a Go value and can
+// print out in a nice, aligned fashion. It supports three
+// modes (normal, compact, and extended) for advanced use.
+//
+// See the Reflect and Print examples for what the output looks like.
+package pretty
+
+// TODO:
+// - Catch cycles
diff --git a/vendor/github.com/kylelemons/godebug/pretty/public.go b/vendor/github.com/kylelemons/godebug/pretty/public.go
new file mode 100644
index 00000000000..fbc5d7abbf8
--- /dev/null
+++ b/vendor/github.com/kylelemons/godebug/pretty/public.go
@@ -0,0 +1,188 @@
+// Copyright 2013 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package pretty
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "net"
+ "reflect"
+ "time"
+
+ "github.com/kylelemons/godebug/diff"
+)
+
+// A Config represents optional configuration parameters for formatting.
+//
+// Some options, notably ShortList, dramatically increase the overhead
+// of pretty-printing a value.
+type Config struct {
+ // Verbosity options
+ Compact bool // One-line output. Overrides Diffable.
+ Diffable bool // Adds extra newlines for more easily diffable output.
+
+ // Field and value options
+ IncludeUnexported bool // Include unexported fields in output
+ PrintStringers bool // Call String on a fmt.Stringer
+ PrintTextMarshalers bool // Call MarshalText on an encoding.TextMarshaler
+ SkipZeroFields bool // Skip struct fields that have a zero value.
+
+ // Output transforms
+ ShortList int // Maximum character length for short lists if nonzero.
+
+ // Type-specific overrides
+ //
+ // Formatter maps a type to a function that will provide a one-line string
+ // representation of the input value. Conceptually:
+ // Formatter[reflect.TypeOf(v)](v) = "v as a string"
+ //
+ // Note that the first argument need not explicitly match the type, it must
+ // merely be callable with it.
+ //
+ // When processing an input value, if its type exists as a key in Formatter:
+ // 1) If the value is nil, no stringification is performed.
+ // This allows overriding of PrintStringers and PrintTextMarshalers.
+ // 2) The value will be called with the input as its only argument.
+ // The function must return a string as its first return value.
+ //
+ // In addition to func literals, two common values for this will be:
+ // fmt.Sprint (function) func Sprint(...interface{}) string
+ // Type.String (method) func (Type) String() string
+ //
+ // Note that neither of these work if the String method is a pointer
+ // method and the input will be provided as a value. In that case,
+ // use a function that calls .String on the formal value parameter.
+ Formatter map[reflect.Type]interface{}
+
+ // If TrackCycles is enabled, pretty will detect and track
+ // self-referential structures. If a self-referential structure (aka a
+ // "recursive" value) is detected, numbered placeholders will be emitted.
+ //
+ // Pointer tracking is disabled by default for performance reasons.
+ TrackCycles bool
+}
+
+// Default Config objects
+var (
+ // DefaultFormatter is the default set of overrides for stringification.
+ DefaultFormatter = map[reflect.Type]interface{}{
+ reflect.TypeOf(time.Time{}): fmt.Sprint,
+ reflect.TypeOf(net.IP{}): fmt.Sprint,
+ reflect.TypeOf((*error)(nil)).Elem(): fmt.Sprint,
+ }
+
+ // CompareConfig is the default configuration used for Compare.
+ CompareConfig = &Config{
+ Diffable: true,
+ IncludeUnexported: true,
+ Formatter: DefaultFormatter,
+ }
+
+ // DefaultConfig is the default configuration used for all other top-level functions.
+ DefaultConfig = &Config{
+ Formatter: DefaultFormatter,
+ }
+
+ // CycleTracker is a convenience config for formatting and comparing recursive structures.
+ CycleTracker = &Config{
+ Diffable: true,
+ Formatter: DefaultFormatter,
+ TrackCycles: true,
+ }
+)
+
+func (cfg *Config) fprint(buf *bytes.Buffer, vals ...interface{}) {
+ ref := &reflector{
+ Config: cfg,
+ }
+ if cfg.TrackCycles {
+ ref.pointerTracker = new(pointerTracker)
+ }
+ for i, val := range vals {
+ if i > 0 {
+ buf.WriteByte('\n')
+ }
+ newFormatter(cfg, buf).write(ref.val2node(reflect.ValueOf(val)))
+ }
+}
+
+// Print writes the DefaultConfig representation of the given values to standard output.
+func Print(vals ...interface{}) {
+ DefaultConfig.Print(vals...)
+}
+
+// Print writes the configured presentation of the given values to standard output.
+func (cfg *Config) Print(vals ...interface{}) {
+ fmt.Println(cfg.Sprint(vals...))
+}
+
+// Sprint returns a string representation of the given value according to the DefaultConfig.
+func Sprint(vals ...interface{}) string {
+ return DefaultConfig.Sprint(vals...)
+}
+
+// Sprint returns a string representation of the given value according to cfg.
+func (cfg *Config) Sprint(vals ...interface{}) string {
+ buf := new(bytes.Buffer)
+ cfg.fprint(buf, vals...)
+ return buf.String()
+}
+
+// Fprint writes the representation of the given value to the writer according to the DefaultConfig.
+func Fprint(w io.Writer, vals ...interface{}) (n int64, err error) {
+ return DefaultConfig.Fprint(w, vals...)
+}
+
+// Fprint writes the representation of the given value to the writer according to the cfg.
+func (cfg *Config) Fprint(w io.Writer, vals ...interface{}) (n int64, err error) {
+ buf := new(bytes.Buffer)
+ cfg.fprint(buf, vals...)
+ return buf.WriteTo(w)
+}
+
+// Compare returns a string containing a line-by-line unified diff of the
+// values in a and b, using the CompareConfig.
+//
+// Each line in the output is prefixed with '+', '-', or ' ' to indicate which
+// side it's from. Lines from the a side are marked with '-', lines from the
+// b side are marked with '+' and lines that are the same on both sides are
+// marked with ' '.
+//
+// The comparison is based on the intentionally-untyped output of Print, and as
+// such this comparison is pretty forviving. In particular, if the types of or
+// types within in a and b are different but have the same representation,
+// Compare will not indicate any differences between them.
+func Compare(a, b interface{}) string {
+ return CompareConfig.Compare(a, b)
+}
+
+// Compare returns a string containing a line-by-line unified diff of the
+// values in got and want according to the cfg.
+//
+// Each line in the output is prefixed with '+', '-', or ' ' to indicate which
+// side it's from. Lines from the a side are marked with '-', lines from the
+// b side are marked with '+' and lines that are the same on both sides are
+// marked with ' '.
+//
+// The comparison is based on the intentionally-untyped output of Print, and as
+// such this comparison is pretty forviving. In particular, if the types of or
+// types within in a and b are different but have the same representation,
+// Compare will not indicate any differences between them.
+func (cfg *Config) Compare(a, b interface{}) string {
+ diffCfg := *cfg
+ diffCfg.Diffable = true
+ return diff.Diff(cfg.Sprint(a), cfg.Sprint(b))
+}
diff --git a/vendor/github.com/kylelemons/godebug/pretty/reflect.go b/vendor/github.com/kylelemons/godebug/pretty/reflect.go
new file mode 100644
index 00000000000..5cd30b7f036
--- /dev/null
+++ b/vendor/github.com/kylelemons/godebug/pretty/reflect.go
@@ -0,0 +1,241 @@
+// Copyright 2013 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package pretty
+
+import (
+ "encoding"
+ "fmt"
+ "reflect"
+ "sort"
+)
+
+func isZeroVal(val reflect.Value) bool {
+ if !val.CanInterface() {
+ return false
+ }
+ z := reflect.Zero(val.Type()).Interface()
+ return reflect.DeepEqual(val.Interface(), z)
+}
+
+// pointerTracker is a helper for tracking pointer chasing to detect cycles.
+type pointerTracker struct {
+ addrs map[uintptr]int // addr[address] = seen count
+
+ lastID int
+ ids map[uintptr]int // ids[address] = id
+}
+
+// track tracks following a reference (pointer, slice, map, etc). Every call to
+// track should be paired with a call to untrack.
+func (p *pointerTracker) track(ptr uintptr) {
+ if p.addrs == nil {
+ p.addrs = make(map[uintptr]int)
+ }
+ p.addrs[ptr]++
+}
+
+// untrack registers that we have backtracked over the reference to the pointer.
+func (p *pointerTracker) untrack(ptr uintptr) {
+ p.addrs[ptr]--
+ if p.addrs[ptr] == 0 {
+ delete(p.addrs, ptr)
+ }
+}
+
+// seen returns whether the pointer was previously seen along this path.
+func (p *pointerTracker) seen(ptr uintptr) bool {
+ _, ok := p.addrs[ptr]
+ return ok
+}
+
+// keep allocates an ID for the given address and returns it.
+func (p *pointerTracker) keep(ptr uintptr) int {
+ if p.ids == nil {
+ p.ids = make(map[uintptr]int)
+ }
+ if _, ok := p.ids[ptr]; !ok {
+ p.lastID++
+ p.ids[ptr] = p.lastID
+ }
+ return p.ids[ptr]
+}
+
+// id returns the ID for the given address.
+func (p *pointerTracker) id(ptr uintptr) (int, bool) {
+ if p.ids == nil {
+ p.ids = make(map[uintptr]int)
+ }
+ id, ok := p.ids[ptr]
+ return id, ok
+}
+
+// reflector adds local state to the recursive reflection logic.
+type reflector struct {
+ *Config
+ *pointerTracker
+}
+
+// follow handles following a possiblly-recursive reference to the given value
+// from the given ptr address.
+func (r *reflector) follow(ptr uintptr, val reflect.Value) node {
+ if r.pointerTracker == nil {
+ // Tracking disabled
+ return r.val2node(val)
+ }
+
+ // If a parent already followed this, emit a reference marker
+ if r.seen(ptr) {
+ id := r.keep(ptr)
+ return ref{id}
+ }
+
+ // Track the pointer we're following while on this recursive branch
+ r.track(ptr)
+ defer r.untrack(ptr)
+ n := r.val2node(val)
+
+ // If the recursion used this ptr, wrap it with a target marker
+ if id, ok := r.id(ptr); ok {
+ return target{id, n}
+ }
+
+ // Otherwise, return the node unadulterated
+ return n
+}
+
+func (r *reflector) val2node(val reflect.Value) node {
+ if !val.IsValid() {
+ return rawVal("nil")
+ }
+
+ if val.CanInterface() {
+ v := val.Interface()
+ if formatter, ok := r.Formatter[val.Type()]; ok {
+ if formatter != nil {
+ res := reflect.ValueOf(formatter).Call([]reflect.Value{val})
+ return rawVal(res[0].Interface().(string))
+ }
+ } else {
+ if s, ok := v.(fmt.Stringer); ok && r.PrintStringers {
+ return stringVal(s.String())
+ }
+ if t, ok := v.(encoding.TextMarshaler); ok && r.PrintTextMarshalers {
+ if raw, err := t.MarshalText(); err == nil { // if NOT an error
+ return stringVal(string(raw))
+ }
+ }
+ }
+ }
+
+ switch kind := val.Kind(); kind {
+ case reflect.Ptr:
+ if val.IsNil() {
+ return rawVal("nil")
+ }
+ return r.follow(val.Pointer(), val.Elem())
+ case reflect.Interface:
+ if val.IsNil() {
+ return rawVal("nil")
+ }
+ return r.val2node(val.Elem())
+ case reflect.String:
+ return stringVal(val.String())
+ case reflect.Slice:
+ n := list{}
+ length := val.Len()
+ ptr := val.Pointer()
+ for i := 0; i < length; i++ {
+ n = append(n, r.follow(ptr, val.Index(i)))
+ }
+ return n
+ case reflect.Array:
+ n := list{}
+ length := val.Len()
+ for i := 0; i < length; i++ {
+ n = append(n, r.val2node(val.Index(i)))
+ }
+ return n
+ case reflect.Map:
+ // Extract the keys and sort them for stable iteration
+ keys := val.MapKeys()
+ pairs := make([]mapPair, 0, len(keys))
+ for _, key := range keys {
+ pairs = append(pairs, mapPair{
+ key: new(formatter).compactString(r.val2node(key)), // can't be cyclic
+ value: val.MapIndex(key),
+ })
+ }
+ sort.Sort(byKey(pairs))
+
+ // Process the keys into the final representation
+ ptr, n := val.Pointer(), keyvals{}
+ for _, pair := range pairs {
+ n = append(n, keyval{
+ key: pair.key,
+ val: r.follow(ptr, pair.value),
+ })
+ }
+ return n
+ case reflect.Struct:
+ n := keyvals{}
+ typ := val.Type()
+ fields := typ.NumField()
+ for i := 0; i < fields; i++ {
+ sf := typ.Field(i)
+ if !r.IncludeUnexported && sf.PkgPath != "" {
+ continue
+ }
+ field := val.Field(i)
+ if r.SkipZeroFields && isZeroVal(field) {
+ continue
+ }
+ n = append(n, keyval{sf.Name, r.val2node(field)})
+ }
+ return n
+ case reflect.Bool:
+ if val.Bool() {
+ return rawVal("true")
+ }
+ return rawVal("false")
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return rawVal(fmt.Sprintf("%d", val.Int()))
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ return rawVal(fmt.Sprintf("%d", val.Uint()))
+ case reflect.Uintptr:
+ return rawVal(fmt.Sprintf("0x%X", val.Uint()))
+ case reflect.Float32, reflect.Float64:
+ return rawVal(fmt.Sprintf("%v", val.Float()))
+ case reflect.Complex64, reflect.Complex128:
+ return rawVal(fmt.Sprintf("%v", val.Complex()))
+ }
+
+ // Fall back to the default %#v if we can
+ if val.CanInterface() {
+ return rawVal(fmt.Sprintf("%#v", val.Interface()))
+ }
+
+ return rawVal(val.String())
+}
+
+type mapPair struct {
+ key string
+ value reflect.Value
+}
+
+type byKey []mapPair
+
+func (v byKey) Len() int { return len(v) }
+func (v byKey) Swap(i, j int) { v[i], v[j] = v[j], v[i] }
+func (v byKey) Less(i, j int) bool { return v[i].key < v[j].key }
diff --git a/vendor/github.com/kylelemons/godebug/pretty/structure.go b/vendor/github.com/kylelemons/godebug/pretty/structure.go
new file mode 100644
index 00000000000..d876f60cad2
--- /dev/null
+++ b/vendor/github.com/kylelemons/godebug/pretty/structure.go
@@ -0,0 +1,223 @@
+// Copyright 2013 Google Inc. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package pretty
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+)
+
+// a formatter stores stateful formatting information as well as being
+// an io.Writer for simplicity.
+type formatter struct {
+ *bufio.Writer
+ *Config
+
+ // Self-referential structure tracking
+ tagNumbers map[int]int // tagNumbers[id] = <#n>
+}
+
+// newFormatter creates a new buffered formatter. For the output to be written
+// to the given writer, this must be accompanied by a call to write (or Flush).
+func newFormatter(cfg *Config, w io.Writer) *formatter {
+ return &formatter{
+ Writer: bufio.NewWriter(w),
+ Config: cfg,
+ tagNumbers: make(map[int]int),
+ }
+}
+
+func (f *formatter) write(n node) {
+ defer f.Flush()
+ n.format(f, "")
+}
+
+func (f *formatter) tagFor(id int) int {
+ if tag, ok := f.tagNumbers[id]; ok {
+ return tag
+ }
+ if f.tagNumbers == nil {
+ return 0
+ }
+ tag := len(f.tagNumbers) + 1
+ f.tagNumbers[id] = tag
+ return tag
+}
+
+type node interface {
+ format(f *formatter, indent string)
+}
+
+func (f *formatter) compactString(n node) string {
+ switch k := n.(type) {
+ case stringVal:
+ return string(k)
+ case rawVal:
+ return string(k)
+ }
+
+ buf := new(bytes.Buffer)
+ f2 := newFormatter(&Config{Compact: true}, buf)
+ f2.tagNumbers = f.tagNumbers // reuse tagNumbers just in case
+ f2.write(n)
+ return buf.String()
+}
+
+type stringVal string
+
+func (str stringVal) format(f *formatter, indent string) {
+ f.WriteString(strconv.Quote(string(str)))
+}
+
+type rawVal string
+
+func (r rawVal) format(f *formatter, indent string) {
+ f.WriteString(string(r))
+}
+
+type keyval struct {
+ key string
+ val node
+}
+
+type keyvals []keyval
+
+func (l keyvals) format(f *formatter, indent string) {
+ f.WriteByte('{')
+
+ switch {
+ case f.Compact:
+ // All on one line:
+ for i, kv := range l {
+ if i > 0 {
+ f.WriteByte(',')
+ }
+ f.WriteString(kv.key)
+ f.WriteByte(':')
+ kv.val.format(f, indent)
+ }
+ case f.Diffable:
+ f.WriteByte('\n')
+ inner := indent + " "
+ // Each value gets its own line:
+ for _, kv := range l {
+ f.WriteString(inner)
+ f.WriteString(kv.key)
+ f.WriteString(": ")
+ kv.val.format(f, inner)
+ f.WriteString(",\n")
+ }
+ f.WriteString(indent)
+ default:
+ keyWidth := 0
+ for _, kv := range l {
+ if kw := len(kv.key); kw > keyWidth {
+ keyWidth = kw
+ }
+ }
+ alignKey := indent + " "
+ alignValue := strings.Repeat(" ", keyWidth)
+ inner := alignKey + alignValue + " "
+ // First and last line shared with bracket:
+ for i, kv := range l {
+ if i > 0 {
+ f.WriteString(",\n")
+ f.WriteString(alignKey)
+ }
+ f.WriteString(kv.key)
+ f.WriteString(": ")
+ f.WriteString(alignValue[len(kv.key):])
+ kv.val.format(f, inner)
+ }
+ }
+
+ f.WriteByte('}')
+}
+
+type list []node
+
+func (l list) format(f *formatter, indent string) {
+ if max := f.ShortList; max > 0 {
+ short := f.compactString(l)
+ if len(short) <= max {
+ f.WriteString(short)
+ return
+ }
+ }
+
+ f.WriteByte('[')
+
+ switch {
+ case f.Compact:
+ // All on one line:
+ for i, v := range l {
+ if i > 0 {
+ f.WriteByte(',')
+ }
+ v.format(f, indent)
+ }
+ case f.Diffable:
+ f.WriteByte('\n')
+ inner := indent + " "
+ // Each value gets its own line:
+ for _, v := range l {
+ f.WriteString(inner)
+ v.format(f, inner)
+ f.WriteString(",\n")
+ }
+ f.WriteString(indent)
+ default:
+ inner := indent + " "
+ // First and last line shared with bracket:
+ for i, v := range l {
+ if i > 0 {
+ f.WriteString(",\n")
+ f.WriteString(inner)
+ }
+ v.format(f, inner)
+ }
+ }
+
+ f.WriteByte(']')
+}
+
+type ref struct {
+ id int
+}
+
+func (r ref) format(f *formatter, indent string) {
+ fmt.Fprintf(f, "", f.tagFor(r.id))
+}
+
+type target struct {
+ id int
+ value node
+}
+
+func (t target) format(f *formatter, indent string) {
+ tag := fmt.Sprintf("<#%d> ", f.tagFor(t.id))
+ switch {
+ case f.Diffable, f.Compact:
+ // no indent changes
+ default:
+ indent += strings.Repeat(" ", len(tag))
+ }
+ f.WriteString(tag)
+ t.value.format(f, indent)
+}
diff --git a/vendor/github.com/pkg/browser/LICENSE b/vendor/github.com/pkg/browser/LICENSE
new file mode 100644
index 00000000000..65f78fb6291
--- /dev/null
+++ b/vendor/github.com/pkg/browser/LICENSE
@@ -0,0 +1,23 @@
+Copyright (c) 2014, Dave Cheney
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/pkg/browser/README.md b/vendor/github.com/pkg/browser/README.md
new file mode 100644
index 00000000000..72b1976e303
--- /dev/null
+++ b/vendor/github.com/pkg/browser/README.md
@@ -0,0 +1,55 @@
+
+# browser
+ import "github.com/pkg/browser"
+
+Package browser provides helpers to open files, readers, and urls in a browser window.
+
+The choice of which browser is started is entirely client dependant.
+
+
+
+
+
+## Variables
+``` go
+var Stderr io.Writer = os.Stderr
+```
+Stderr is the io.Writer to which executed commands write standard error.
+
+``` go
+var Stdout io.Writer = os.Stdout
+```
+Stdout is the io.Writer to which executed commands write standard output.
+
+
+## func OpenFile
+``` go
+func OpenFile(path string) error
+```
+OpenFile opens new browser window for the file path.
+
+
+## func OpenReader
+``` go
+func OpenReader(r io.Reader) error
+```
+OpenReader consumes the contents of r and presents the
+results in a new browser window.
+
+
+## func OpenURL
+``` go
+func OpenURL(url string) error
+```
+OpenURL opens a new browser window pointing to url.
+
+
+
+
+
+
+
+
+
+- - -
+Generated by [godoc2md](http://godoc.org/github.com/davecheney/godoc2md)
diff --git a/vendor/github.com/pkg/browser/browser.go b/vendor/github.com/pkg/browser/browser.go
new file mode 100644
index 00000000000..d7969d74d80
--- /dev/null
+++ b/vendor/github.com/pkg/browser/browser.go
@@ -0,0 +1,57 @@
+// Package browser provides helpers to open files, readers, and urls in a browser window.
+//
+// The choice of which browser is started is entirely client dependant.
+package browser
+
+import (
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path/filepath"
+)
+
+// Stdout is the io.Writer to which executed commands write standard output.
+var Stdout io.Writer = os.Stdout
+
+// Stderr is the io.Writer to which executed commands write standard error.
+var Stderr io.Writer = os.Stderr
+
+// OpenFile opens new browser window for the file path.
+func OpenFile(path string) error {
+ path, err := filepath.Abs(path)
+ if err != nil {
+ return err
+ }
+ return OpenURL("file://" + path)
+}
+
+// OpenReader consumes the contents of r and presents the
+// results in a new browser window.
+func OpenReader(r io.Reader) error {
+ f, err := ioutil.TempFile("", "browser.*.html")
+ if err != nil {
+ return fmt.Errorf("browser: could not create temporary file: %v", err)
+ }
+ if _, err := io.Copy(f, r); err != nil {
+ f.Close()
+ return fmt.Errorf("browser: caching temporary file failed: %v", err)
+ }
+ if err := f.Close(); err != nil {
+ return fmt.Errorf("browser: caching temporary file failed: %v", err)
+ }
+ return OpenFile(f.Name())
+}
+
+// OpenURL opens a new browser window pointing to url.
+func OpenURL(url string) error {
+ return openBrowser(url)
+}
+
+func runCmd(prog string, args ...string) error {
+ cmd := exec.Command(prog, args...)
+ cmd.Stdout = Stdout
+ cmd.Stderr = Stderr
+ return cmd.Run()
+}
diff --git a/vendor/github.com/pkg/browser/browser_darwin.go b/vendor/github.com/pkg/browser/browser_darwin.go
new file mode 100644
index 00000000000..8507cf7c2b4
--- /dev/null
+++ b/vendor/github.com/pkg/browser/browser_darwin.go
@@ -0,0 +1,5 @@
+package browser
+
+func openBrowser(url string) error {
+ return runCmd("open", url)
+}
diff --git a/vendor/github.com/pkg/browser/browser_freebsd.go b/vendor/github.com/pkg/browser/browser_freebsd.go
new file mode 100644
index 00000000000..4fc7ff0761b
--- /dev/null
+++ b/vendor/github.com/pkg/browser/browser_freebsd.go
@@ -0,0 +1,14 @@
+package browser
+
+import (
+ "errors"
+ "os/exec"
+)
+
+func openBrowser(url string) error {
+ err := runCmd("xdg-open", url)
+ if e, ok := err.(*exec.Error); ok && e.Err == exec.ErrNotFound {
+ return errors.New("xdg-open: command not found - install xdg-utils from ports(8)")
+ }
+ return err
+}
diff --git a/vendor/github.com/pkg/browser/browser_linux.go b/vendor/github.com/pkg/browser/browser_linux.go
new file mode 100644
index 00000000000..d26cdddf9c1
--- /dev/null
+++ b/vendor/github.com/pkg/browser/browser_linux.go
@@ -0,0 +1,21 @@
+package browser
+
+import (
+ "os/exec"
+ "strings"
+)
+
+func openBrowser(url string) error {
+ providers := []string{"xdg-open", "x-www-browser", "www-browser"}
+
+ // There are multiple possible providers to open a browser on linux
+ // One of them is xdg-open, another is x-www-browser, then there's www-browser, etc.
+ // Look for one that exists and run it
+ for _, provider := range providers {
+ if _, err := exec.LookPath(provider); err == nil {
+ return runCmd(provider, url)
+ }
+ }
+
+ return &exec.Error{Name: strings.Join(providers, ","), Err: exec.ErrNotFound}
+}
diff --git a/vendor/github.com/pkg/browser/browser_netbsd.go b/vendor/github.com/pkg/browser/browser_netbsd.go
new file mode 100644
index 00000000000..65a5e5a2934
--- /dev/null
+++ b/vendor/github.com/pkg/browser/browser_netbsd.go
@@ -0,0 +1,14 @@
+package browser
+
+import (
+ "errors"
+ "os/exec"
+)
+
+func openBrowser(url string) error {
+ err := runCmd("xdg-open", url)
+ if e, ok := err.(*exec.Error); ok && e.Err == exec.ErrNotFound {
+ return errors.New("xdg-open: command not found - install xdg-utils from pkgsrc(7)")
+ }
+ return err
+}
diff --git a/vendor/github.com/pkg/browser/browser_openbsd.go b/vendor/github.com/pkg/browser/browser_openbsd.go
new file mode 100644
index 00000000000..4fc7ff0761b
--- /dev/null
+++ b/vendor/github.com/pkg/browser/browser_openbsd.go
@@ -0,0 +1,14 @@
+package browser
+
+import (
+ "errors"
+ "os/exec"
+)
+
+func openBrowser(url string) error {
+ err := runCmd("xdg-open", url)
+ if e, ok := err.(*exec.Error); ok && e.Err == exec.ErrNotFound {
+ return errors.New("xdg-open: command not found - install xdg-utils from ports(8)")
+ }
+ return err
+}
diff --git a/vendor/github.com/pkg/browser/browser_unsupported.go b/vendor/github.com/pkg/browser/browser_unsupported.go
new file mode 100644
index 00000000000..7c5c17d34d2
--- /dev/null
+++ b/vendor/github.com/pkg/browser/browser_unsupported.go
@@ -0,0 +1,12 @@
+// +build !linux,!windows,!darwin,!openbsd,!freebsd,!netbsd
+
+package browser
+
+import (
+ "fmt"
+ "runtime"
+)
+
+func openBrowser(url string) error {
+ return fmt.Errorf("openBrowser: unsupported operating system: %v", runtime.GOOS)
+}
diff --git a/vendor/github.com/pkg/browser/browser_windows.go b/vendor/github.com/pkg/browser/browser_windows.go
new file mode 100644
index 00000000000..63e192959a5
--- /dev/null
+++ b/vendor/github.com/pkg/browser/browser_windows.go
@@ -0,0 +1,7 @@
+package browser
+
+import "golang.org/x/sys/windows"
+
+func openBrowser(url string) error {
+ return windows.ShellExecute(0, nil, windows.StringToUTF16Ptr(url), nil, nil, windows.SW_SHOWNORMAL)
+}
diff --git a/vendor/golang.org/x/net/context/go17.go b/vendor/golang.org/x/net/context/go17.go
index 0a54bdbcc65..2cb9c408f2e 100644
--- a/vendor/golang.org/x/net/context/go17.go
+++ b/vendor/golang.org/x/net/context/go17.go
@@ -32,7 +32,7 @@ var DeadlineExceeded = context.DeadlineExceeded
// call cancel as soon as the operations running in this Context complete.
func WithCancel(parent Context) (ctx Context, cancel CancelFunc) {
ctx, f := context.WithCancel(parent)
- return ctx, CancelFunc(f)
+ return ctx, f
}
// WithDeadline returns a copy of the parent context with the deadline adjusted
@@ -46,7 +46,7 @@ func WithCancel(parent Context) (ctx Context, cancel CancelFunc) {
// call cancel as soon as the operations running in this Context complete.
func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) {
ctx, f := context.WithDeadline(parent, deadline)
- return ctx, CancelFunc(f)
+ return ctx, f
}
// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)).
diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go
index aa3b0864ec5..43cc2a34ad0 100644
--- a/vendor/golang.org/x/net/http2/server.go
+++ b/vendor/golang.org/x/net/http2/server.go
@@ -143,7 +143,7 @@ type Server struct {
}
func (s *Server) initialConnRecvWindowSize() int32 {
- if s.MaxUploadBufferPerConnection > initialWindowSize {
+ if s.MaxUploadBufferPerConnection >= initialWindowSize {
return s.MaxUploadBufferPerConnection
}
return 1 << 20
@@ -869,9 +869,7 @@ func (sc *serverConn) serve() {
// Each connection starts with initialWindowSize inflow tokens.
// If a higher value is configured, we add more tokens.
- if diff := sc.srv.initialConnRecvWindowSize() - initialWindowSize; diff > 0 {
- sc.sendWindowUpdate(nil, int(diff))
- }
+ sc.sendWindowUpdate(nil)
if err := sc.readPreface(); err != nil {
sc.condlogf(err, "http2: server: error reading preface from client %v: %v", sc.conn.RemoteAddr(), err)
@@ -1371,6 +1369,9 @@ func (sc *serverConn) startGracefulShutdownInternal() {
func (sc *serverConn) goAway(code ErrCode) {
sc.serveG.check()
if sc.inGoAway {
+ if sc.goAwayCode == ErrCodeNo {
+ sc.goAwayCode = code
+ }
return
}
sc.inGoAway = true
@@ -1585,7 +1586,7 @@ func (sc *serverConn) closeStream(st *stream, err error) {
if p := st.body; p != nil {
// Return any buffered unread bytes worth of conn-level flow control.
// See golang.org/issue/16481
- sc.sendWindowUpdate(nil, p.Len())
+ sc.sendWindowUpdate(nil)
p.CloseWithError(err)
}
@@ -1733,7 +1734,7 @@ func (sc *serverConn) processData(f *DataFrame) error {
// sendWindowUpdate, which also schedules sending the
// frames.
sc.inflow.take(int32(f.Length))
- sc.sendWindowUpdate(nil, int(f.Length)) // conn-level
+ sc.sendWindowUpdate(nil) // conn-level
if st != nil && st.resetQueued {
// Already have a stream error in flight. Don't send another.
@@ -1751,7 +1752,7 @@ func (sc *serverConn) processData(f *DataFrame) error {
return sc.countError("data_flow", streamError(id, ErrCodeFlowControl))
}
sc.inflow.take(int32(f.Length))
- sc.sendWindowUpdate(nil, int(f.Length)) // conn-level
+ sc.sendWindowUpdate(nil) // conn-level
st.body.CloseWithError(fmt.Errorf("sender tried to send more than declared Content-Length of %d bytes", st.declBodyBytes))
// RFC 7540, sec 8.1.2.6: A request or response is also malformed if the
@@ -1769,7 +1770,7 @@ func (sc *serverConn) processData(f *DataFrame) error {
if len(data) > 0 {
wrote, err := st.body.Write(data)
if err != nil {
- sc.sendWindowUpdate(nil, int(f.Length)-wrote)
+ sc.sendWindowUpdate32(nil, int32(f.Length)-int32(wrote))
return sc.countError("body_write_err", streamError(id, ErrCodeStreamClosed))
}
if wrote != len(data) {
@@ -2096,12 +2097,6 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res
return nil, nil, sc.countError("bad_path_method", streamError(f.StreamID, ErrCodeProtocol))
}
- bodyOpen := !f.StreamEnded()
- if rp.method == "HEAD" && bodyOpen {
- // HEAD requests can't have bodies
- return nil, nil, sc.countError("head_body", streamError(f.StreamID, ErrCodeProtocol))
- }
-
rp.header = make(http.Header)
for _, hf := range f.RegularFields() {
rp.header.Add(sc.canonicalHeader(hf.Name), hf.Value)
@@ -2114,6 +2109,7 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res
if err != nil {
return nil, nil, err
}
+ bodyOpen := !f.StreamEnded()
if bodyOpen {
if vv, ok := rp.header["Content-Length"]; ok {
if cl, err := strconv.ParseUint(vv[0], 10, 63); err == nil {
@@ -2326,17 +2322,32 @@ func (sc *serverConn) noteBodyReadFromHandler(st *stream, n int, err error) {
func (sc *serverConn) noteBodyRead(st *stream, n int) {
sc.serveG.check()
- sc.sendWindowUpdate(nil, n) // conn-level
+ sc.sendWindowUpdate(nil) // conn-level
if st.state != stateHalfClosedRemote && st.state != stateClosed {
// Don't send this WINDOW_UPDATE if the stream is closed
// remotely.
- sc.sendWindowUpdate(st, n)
+ sc.sendWindowUpdate(st)
}
}
// st may be nil for conn-level
-func (sc *serverConn) sendWindowUpdate(st *stream, n int) {
+func (sc *serverConn) sendWindowUpdate(st *stream) {
sc.serveG.check()
+
+ var n int32
+ if st == nil {
+ if avail, windowSize := sc.inflow.available(), sc.srv.initialConnRecvWindowSize(); avail > windowSize/2 {
+ return
+ } else {
+ n = windowSize - avail
+ }
+ } else {
+ if avail, windowSize := st.inflow.available(), sc.srv.initialStreamRecvWindowSize(); avail > windowSize/2 {
+ return
+ } else {
+ n = windowSize - avail
+ }
+ }
// "The legal range for the increment to the flow control
// window is 1 to 2^31-1 (2,147,483,647) octets."
// A Go Read call on 64-bit machines could in theory read
@@ -2502,6 +2513,10 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) {
rws.writeHeader(200)
}
+ if rws.handlerDone {
+ rws.promoteUndeclaredTrailers()
+ }
+
isHeadResp := rws.req.Method == "HEAD"
if !rws.sentHeader {
rws.sentHeader = true
@@ -2573,10 +2588,6 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) {
return 0, nil
}
- if rws.handlerDone {
- rws.promoteUndeclaredTrailers()
- }
-
// only send trailers if they have actually been defined by the
// server handler.
hasNonemptyTrailers := rws.hasNonemptyTrailers()
diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go
index 90fdc28cf97..c5d005bba7c 100644
--- a/vendor/golang.org/x/net/http2/transport.go
+++ b/vendor/golang.org/x/net/http2/transport.go
@@ -258,7 +258,8 @@ func (t *Transport) initConnPool() {
// HTTP/2 server.
type ClientConn struct {
t *Transport
- tconn net.Conn // usually *tls.Conn, except specialized impls
+ tconn net.Conn // usually *tls.Conn, except specialized impls
+ tconnClosed bool
tlsState *tls.ConnectionState // nil only for specialized impls
reused uint32 // whether conn is being reused; atomic
singleUse bool // whether being used for a single http.Request
@@ -344,8 +345,8 @@ type clientStream struct {
readErr error // sticky read error; owned by transportResponseBody.Read
reqBody io.ReadCloser
- reqBodyContentLength int64 // -1 means unknown
- reqBodyClosed bool // body has been closed; guarded by cc.mu
+ reqBodyContentLength int64 // -1 means unknown
+ reqBodyClosed chan struct{} // guarded by cc.mu; non-nil on Close, closed when done
// owned by writeRequest:
sentEndStream bool // sent an END_STREAM flag to the peer
@@ -385,9 +386,8 @@ func (cs *clientStream) abortStreamLocked(err error) {
cs.abortErr = err
close(cs.abort)
})
- if cs.reqBody != nil && !cs.reqBodyClosed {
- cs.reqBody.Close()
- cs.reqBodyClosed = true
+ if cs.reqBody != nil {
+ cs.closeReqBodyLocked()
}
// TODO(dneil): Clean up tests where cs.cc.cond is nil.
if cs.cc.cond != nil {
@@ -400,13 +400,24 @@ func (cs *clientStream) abortRequestBodyWrite() {
cc := cs.cc
cc.mu.Lock()
defer cc.mu.Unlock()
- if cs.reqBody != nil && !cs.reqBodyClosed {
- cs.reqBody.Close()
- cs.reqBodyClosed = true
+ if cs.reqBody != nil && cs.reqBodyClosed == nil {
+ cs.closeReqBodyLocked()
cc.cond.Broadcast()
}
}
+func (cs *clientStream) closeReqBodyLocked() {
+ if cs.reqBodyClosed != nil {
+ return
+ }
+ cs.reqBodyClosed = make(chan struct{})
+ reqBodyClosed := cs.reqBodyClosed
+ go func() {
+ cs.reqBody.Close()
+ close(reqBodyClosed)
+ }()
+}
+
type stickyErrWriter struct {
conn net.Conn
timeout time.Duration
@@ -921,10 +932,10 @@ func (cc *ClientConn) onIdleTimeout() {
cc.closeIfIdle()
}
-func (cc *ClientConn) closeConn() error {
+func (cc *ClientConn) closeConn() {
t := time.AfterFunc(250*time.Millisecond, cc.forceCloseConn)
defer t.Stop()
- return cc.tconn.Close()
+ cc.tconn.Close()
}
// A tls.Conn.Close can hang for a long time if the peer is unresponsive.
@@ -990,7 +1001,8 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error {
shutdownEnterWaitStateHook()
select {
case <-done:
- return cc.closeConn()
+ cc.closeConn()
+ return nil
case <-ctx.Done():
cc.mu.Lock()
// Free the goroutine above
@@ -1027,7 +1039,7 @@ func (cc *ClientConn) sendGoAway() error {
// closes the client connection immediately. In-flight requests are interrupted.
// err is sent to streams.
-func (cc *ClientConn) closeForError(err error) error {
+func (cc *ClientConn) closeForError(err error) {
cc.mu.Lock()
cc.closed = true
for _, cs := range cc.streams {
@@ -1035,7 +1047,7 @@ func (cc *ClientConn) closeForError(err error) error {
}
cc.cond.Broadcast()
cc.mu.Unlock()
- return cc.closeConn()
+ cc.closeConn()
}
// Close closes the client connection immediately.
@@ -1043,16 +1055,17 @@ func (cc *ClientConn) closeForError(err error) error {
// In-flight requests are interrupted. For a graceful shutdown, use Shutdown instead.
func (cc *ClientConn) Close() error {
err := errors.New("http2: client connection force closed via ClientConn.Close")
- return cc.closeForError(err)
+ cc.closeForError(err)
+ return nil
}
// closes the client connection immediately. In-flight requests are interrupted.
-func (cc *ClientConn) closeForLostPing() error {
+func (cc *ClientConn) closeForLostPing() {
err := errors.New("http2: client connection lost")
if f := cc.t.CountError; f != nil {
f("conn_close_lost_ping")
}
- return cc.closeForError(err)
+ cc.closeForError(err)
}
// errRequestCanceled is a copy of net/http's errRequestCanceled because it's not
@@ -1430,11 +1443,19 @@ func (cs *clientStream) cleanupWriteRequest(err error) {
// and in multiple cases: server replies <=299 and >299
// while still writing request body
cc.mu.Lock()
+ mustCloseBody := false
+ if cs.reqBody != nil && cs.reqBodyClosed == nil {
+ mustCloseBody = true
+ cs.reqBodyClosed = make(chan struct{})
+ }
bodyClosed := cs.reqBodyClosed
- cs.reqBodyClosed = true
cc.mu.Unlock()
- if !bodyClosed && cs.reqBody != nil {
+ if mustCloseBody {
cs.reqBody.Close()
+ close(bodyClosed)
+ }
+ if bodyClosed != nil {
+ <-bodyClosed
}
if err != nil && cs.sentEndStream {
@@ -1614,7 +1635,7 @@ func (cs *clientStream) writeRequestBody(req *http.Request) (err error) {
}
if err != nil {
cc.mu.Lock()
- bodyClosed := cs.reqBodyClosed
+ bodyClosed := cs.reqBodyClosed != nil
cc.mu.Unlock()
switch {
case bodyClosed:
@@ -1709,7 +1730,7 @@ func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error)
if cc.closed {
return 0, errClientConnClosed
}
- if cs.reqBodyClosed {
+ if cs.reqBodyClosed != nil {
return 0, errStopReqBodyWrite
}
select {
@@ -2005,7 +2026,7 @@ func (cc *ClientConn) forgetStreamID(id uint32) {
// wake up RoundTrip if there is a pending request.
cc.cond.Broadcast()
- closeOnIdle := cc.singleUse || cc.doNotReuse || cc.t.disableKeepAlives()
+ closeOnIdle := cc.singleUse || cc.doNotReuse || cc.t.disableKeepAlives() || cc.goAway != nil
if closeOnIdle && cc.streamsReserved == 0 && len(cc.streams) == 0 {
if VerboseLogs {
cc.vlogf("http2: Transport closing idle conn %p (forSingleUse=%v, maxStream=%v)", cc, cc.singleUse, cc.nextStreamID-2)
@@ -2081,6 +2102,7 @@ func (rl *clientConnReadLoop) cleanup() {
err = io.ErrUnexpectedEOF
}
cc.closed = true
+
for _, cs := range cc.streams {
select {
case <-cs.peerClosed:
@@ -2674,7 +2696,6 @@ func (rl *clientConnReadLoop) processGoAway(f *GoAwayFrame) error {
if fn := cc.t.CountError; fn != nil {
fn("recv_goaway_" + f.ErrCode.stringToken())
}
-
}
cc.setGoAway(f)
return nil
@@ -3028,7 +3049,7 @@ func traceGotConn(req *http.Request, cc *ClientConn, reused bool) {
cc.mu.Lock()
ci.WasIdle = len(cc.streams) == 0 && reused
if ci.WasIdle && !cc.lastActive.IsZero() {
- ci.IdleTime = time.Now().Sub(cc.lastActive)
+ ci.IdleTime = time.Since(cc.lastActive)
}
cc.mu.Unlock()
diff --git a/vendor/golang.org/x/net/trace/trace.go b/vendor/golang.org/x/net/trace/trace.go
index 3ebf6f2daa3..eae2a99f54c 100644
--- a/vendor/golang.org/x/net/trace/trace.go
+++ b/vendor/golang.org/x/net/trace/trace.go
@@ -395,7 +395,7 @@ func New(family, title string) Trace {
}
func (tr *trace) Finish() {
- elapsed := time.Now().Sub(tr.Start)
+ elapsed := time.Since(tr.Start)
tr.mu.Lock()
tr.Elapsed = elapsed
tr.mu.Unlock()
diff --git a/vendor/golang.org/x/sys/cpu/cpu_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_arm64.go
index bbaba18bc05..f3eb993bf24 100644
--- a/vendor/golang.org/x/sys/cpu/cpu_arm64.go
+++ b/vendor/golang.org/x/sys/cpu/cpu_arm64.go
@@ -6,7 +6,10 @@ package cpu
import "runtime"
-const cacheLineSize = 64
+// cacheLineSize is used to prevent false sharing of cache lines.
+// We choose 128 because Apple Silicon, a.k.a. M1, has 128-byte cache line size.
+// It doesn't cost much and is much more future-proof.
+const cacheLineSize = 128
func initOptions() {
options = []option{
diff --git a/vendor/golang.org/x/sys/plan9/syscall.go b/vendor/golang.org/x/sys/plan9/syscall.go
index a25223b8fd4..67e5b0115c1 100644
--- a/vendor/golang.org/x/sys/plan9/syscall.go
+++ b/vendor/golang.org/x/sys/plan9/syscall.go
@@ -29,8 +29,6 @@ import (
"bytes"
"strings"
"unsafe"
-
- "golang.org/x/sys/internal/unsafeheader"
)
// ByteSliceFromString returns a NUL-terminated slice of bytes
@@ -82,13 +80,7 @@ func BytePtrToString(p *byte) string {
ptr = unsafe.Pointer(uintptr(ptr) + 1)
}
- var s []byte
- h := (*unsafeheader.Slice)(unsafe.Pointer(&s))
- h.Data = unsafe.Pointer(p)
- h.Len = n
- h.Cap = n
-
- return string(s)
+ return string(unsafe.Slice(p, n))
}
// Single-word zero for use when we need a valid pointer to 0 bytes.
diff --git a/vendor/golang.org/x/sys/unix/ioctl_linux.go b/vendor/golang.org/x/sys/unix/ioctl_linux.go
index 884430b810c..0d12c0851ad 100644
--- a/vendor/golang.org/x/sys/unix/ioctl_linux.go
+++ b/vendor/golang.org/x/sys/unix/ioctl_linux.go
@@ -4,9 +4,7 @@
package unix
-import (
- "unsafe"
-)
+import "unsafe"
// IoctlRetInt performs an ioctl operation specified by req on a device
// associated with opened file descriptor fd, and returns a non-negative
@@ -217,3 +215,19 @@ func IoctlKCMAttach(fd int, info KCMAttach) error {
func IoctlKCMUnattach(fd int, info KCMUnattach) error {
return ioctlPtr(fd, SIOCKCMUNATTACH, unsafe.Pointer(&info))
}
+
+// IoctlLoopGetStatus64 gets the status of the loop device associated with the
+// file descriptor fd using the LOOP_GET_STATUS64 operation.
+func IoctlLoopGetStatus64(fd int) (*LoopInfo64, error) {
+ var value LoopInfo64
+ if err := ioctlPtr(fd, LOOP_GET_STATUS64, unsafe.Pointer(&value)); err != nil {
+ return nil, err
+ }
+ return &value, nil
+}
+
+// IoctlLoopSetStatus64 sets the status of the loop device associated with the
+// file descriptor fd using the LOOP_SET_STATUS64 operation.
+func IoctlLoopSetStatus64(fd int, value *LoopInfo64) error {
+ return ioctlPtr(fd, LOOP_SET_STATUS64, unsafe.Pointer(value))
+}
diff --git a/vendor/golang.org/x/sys/unix/mkall.sh b/vendor/golang.org/x/sys/unix/mkall.sh
index 6fc18353df0..1b2b424a726 100644
--- a/vendor/golang.org/x/sys/unix/mkall.sh
+++ b/vendor/golang.org/x/sys/unix/mkall.sh
@@ -156,10 +156,10 @@ openbsd_amd64)
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
;;
openbsd_arm)
+ mkasm="go run mkasm.go"
mkerrors="$mkerrors"
- mksyscall="go run mksyscall.go -l32 -openbsd -arm"
+ mksyscall="go run mksyscall.go -l32 -openbsd -arm -libc"
mksysctl="go run mksysctl_openbsd.go"
- mksysnum="go run mksysnum.go 'https://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master'"
# Let the type of C char be signed for making the bare syscall
# API consistent across platforms.
mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
@@ -214,11 +214,6 @@ esac
if [ "$GOOSARCH" == "aix_ppc64" ]; then
# aix/ppc64 script generates files instead of writing to stdin.
echo "$mksyscall -tags $GOOS,$GOARCH $syscall_goos $GOOSARCH_in && gofmt -w zsyscall_$GOOSARCH.go && gofmt -w zsyscall_"$GOOSARCH"_gccgo.go && gofmt -w zsyscall_"$GOOSARCH"_gc.go " ;
- elif [ "$GOOS" == "darwin" ]; then
- # 1.12 and later, syscalls via libSystem
- echo "$mksyscall -tags $GOOS,$GOARCH,go1.12 $syscall_goos $GOOSARCH_in |gofmt >zsyscall_$GOOSARCH.go";
- # 1.13 and later, syscalls via libSystem (including syscallPtr)
- echo "$mksyscall -tags $GOOS,$GOARCH,go1.13 syscall_darwin.1_13.go |gofmt >zsyscall_$GOOSARCH.1_13.go";
elif [ "$GOOS" == "illumos" ]; then
# illumos code generation requires a --illumos switch
echo "$mksyscall -illumos -tags illumos,$GOARCH syscall_illumos.go |gofmt > zsyscall_illumos_$GOARCH.go";
diff --git a/vendor/golang.org/x/sys/unix/str.go b/vendor/golang.org/x/sys/unix/str.go
deleted file mode 100644
index 8ba89ed8694..00000000000
--- a/vendor/golang.org/x/sys/unix/str.go
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris
-// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris
-
-package unix
-
-func itoa(val int) string { // do it here rather than with fmt to avoid dependency
- if val < 0 {
- return "-" + uitoa(uint(-val))
- }
- return uitoa(uint(val))
-}
-
-func uitoa(val uint) string {
- var buf [32]byte // big enough for int64
- i := len(buf) - 1
- for val >= 10 {
- buf[i] = byte(val%10 + '0')
- i--
- val /= 10
- }
- buf[i] = byte(val + '0')
- return string(buf[i:])
-}
diff --git a/vendor/golang.org/x/sys/unix/syscall.go b/vendor/golang.org/x/sys/unix/syscall.go
index 649fa87405d..63e8c838317 100644
--- a/vendor/golang.org/x/sys/unix/syscall.go
+++ b/vendor/golang.org/x/sys/unix/syscall.go
@@ -29,8 +29,6 @@ import (
"bytes"
"strings"
"unsafe"
-
- "golang.org/x/sys/internal/unsafeheader"
)
// ByteSliceFromString returns a NUL-terminated slice of bytes
@@ -82,13 +80,7 @@ func BytePtrToString(p *byte) string {
ptr = unsafe.Pointer(uintptr(ptr) + 1)
}
- var s []byte
- h := (*unsafeheader.Slice)(unsafe.Pointer(&s))
- h.Data = unsafe.Pointer(p)
- h.Len = n
- h.Cap = n
-
- return string(s)
+ return string(unsafe.Slice(p, n))
}
// Single-word zero for use when we need a valid pointer to 0 bytes.
diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.1_12.go b/vendor/golang.org/x/sys/unix/syscall_darwin.1_12.go
deleted file mode 100644
index b0098607c70..00000000000
--- a/vendor/golang.org/x/sys/unix/syscall_darwin.1_12.go
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build darwin && go1.12 && !go1.13
-// +build darwin,go1.12,!go1.13
-
-package unix
-
-import (
- "unsafe"
-)
-
-const _SYS_GETDIRENTRIES64 = 344
-
-func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) {
- // To implement this using libSystem we'd need syscall_syscallPtr for
- // fdopendir. However, syscallPtr was only added in Go 1.13, so we fall
- // back to raw syscalls for this func on Go 1.12.
- var p unsafe.Pointer
- if len(buf) > 0 {
- p = unsafe.Pointer(&buf[0])
- } else {
- p = unsafe.Pointer(&_zero)
- }
- r0, _, e1 := Syscall6(_SYS_GETDIRENTRIES64, uintptr(fd), uintptr(p), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0)
- n = int(r0)
- if e1 != 0 {
- return n, errnoErr(e1)
- }
- return n, nil
-}
diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.1_13.go b/vendor/golang.org/x/sys/unix/syscall_darwin.1_13.go
deleted file mode 100644
index 1596426b1e2..00000000000
--- a/vendor/golang.org/x/sys/unix/syscall_darwin.1_13.go
+++ /dev/null
@@ -1,108 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build darwin && go1.13
-// +build darwin,go1.13
-
-package unix
-
-import (
- "unsafe"
-
- "golang.org/x/sys/internal/unsafeheader"
-)
-
-//sys closedir(dir uintptr) (err error)
-//sys readdir_r(dir uintptr, entry *Dirent, result **Dirent) (res Errno)
-
-func fdopendir(fd int) (dir uintptr, err error) {
- r0, _, e1 := syscall_syscallPtr(libc_fdopendir_trampoline_addr, uintptr(fd), 0, 0)
- dir = uintptr(r0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-var libc_fdopendir_trampoline_addr uintptr
-
-//go:cgo_import_dynamic libc_fdopendir fdopendir "/usr/lib/libSystem.B.dylib"
-
-func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) {
- // Simulate Getdirentries using fdopendir/readdir_r/closedir.
- // We store the number of entries to skip in the seek
- // offset of fd. See issue #31368.
- // It's not the full required semantics, but should handle the case
- // of calling Getdirentries or ReadDirent repeatedly.
- // It won't handle assigning the results of lseek to *basep, or handle
- // the directory being edited underfoot.
- skip, err := Seek(fd, 0, 1 /* SEEK_CUR */)
- if err != nil {
- return 0, err
- }
-
- // We need to duplicate the incoming file descriptor
- // because the caller expects to retain control of it, but
- // fdopendir expects to take control of its argument.
- // Just Dup'ing the file descriptor is not enough, as the
- // result shares underlying state. Use Openat to make a really
- // new file descriptor referring to the same directory.
- fd2, err := Openat(fd, ".", O_RDONLY, 0)
- if err != nil {
- return 0, err
- }
- d, err := fdopendir(fd2)
- if err != nil {
- Close(fd2)
- return 0, err
- }
- defer closedir(d)
-
- var cnt int64
- for {
- var entry Dirent
- var entryp *Dirent
- e := readdir_r(d, &entry, &entryp)
- if e != 0 {
- return n, errnoErr(e)
- }
- if entryp == nil {
- break
- }
- if skip > 0 {
- skip--
- cnt++
- continue
- }
-
- reclen := int(entry.Reclen)
- if reclen > len(buf) {
- // Not enough room. Return for now.
- // The counter will let us know where we should start up again.
- // Note: this strategy for suspending in the middle and
- // restarting is O(n^2) in the length of the directory. Oh well.
- break
- }
-
- // Copy entry into return buffer.
- var s []byte
- hdr := (*unsafeheader.Slice)(unsafe.Pointer(&s))
- hdr.Data = unsafe.Pointer(&entry)
- hdr.Cap = reclen
- hdr.Len = reclen
- copy(buf, s)
-
- buf = buf[reclen:]
- n += reclen
- cnt++
- }
- // Set the seek offset of the input fd to record
- // how many files we've already returned.
- _, err = Seek(fd, cnt, 0 /* SEEK_SET */)
- if err != nil {
- return n, err
- }
-
- return n, nil
-}
diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.go b/vendor/golang.org/x/sys/unix/syscall_darwin.go
index 4f87f16ea7c..1f63382182f 100644
--- a/vendor/golang.org/x/sys/unix/syscall_darwin.go
+++ b/vendor/golang.org/x/sys/unix/syscall_darwin.go
@@ -19,6 +19,96 @@ import (
"unsafe"
)
+//sys closedir(dir uintptr) (err error)
+//sys readdir_r(dir uintptr, entry *Dirent, result **Dirent) (res Errno)
+
+func fdopendir(fd int) (dir uintptr, err error) {
+ r0, _, e1 := syscall_syscallPtr(libc_fdopendir_trampoline_addr, uintptr(fd), 0, 0)
+ dir = uintptr(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+var libc_fdopendir_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_fdopendir fdopendir "/usr/lib/libSystem.B.dylib"
+
+func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) {
+ // Simulate Getdirentries using fdopendir/readdir_r/closedir.
+ // We store the number of entries to skip in the seek
+ // offset of fd. See issue #31368.
+ // It's not the full required semantics, but should handle the case
+ // of calling Getdirentries or ReadDirent repeatedly.
+ // It won't handle assigning the results of lseek to *basep, or handle
+ // the directory being edited underfoot.
+ skip, err := Seek(fd, 0, 1 /* SEEK_CUR */)
+ if err != nil {
+ return 0, err
+ }
+
+ // We need to duplicate the incoming file descriptor
+ // because the caller expects to retain control of it, but
+ // fdopendir expects to take control of its argument.
+ // Just Dup'ing the file descriptor is not enough, as the
+ // result shares underlying state. Use Openat to make a really
+ // new file descriptor referring to the same directory.
+ fd2, err := Openat(fd, ".", O_RDONLY, 0)
+ if err != nil {
+ return 0, err
+ }
+ d, err := fdopendir(fd2)
+ if err != nil {
+ Close(fd2)
+ return 0, err
+ }
+ defer closedir(d)
+
+ var cnt int64
+ for {
+ var entry Dirent
+ var entryp *Dirent
+ e := readdir_r(d, &entry, &entryp)
+ if e != 0 {
+ return n, errnoErr(e)
+ }
+ if entryp == nil {
+ break
+ }
+ if skip > 0 {
+ skip--
+ cnt++
+ continue
+ }
+
+ reclen := int(entry.Reclen)
+ if reclen > len(buf) {
+ // Not enough room. Return for now.
+ // The counter will let us know where we should start up again.
+ // Note: this strategy for suspending in the middle and
+ // restarting is O(n^2) in the length of the directory. Oh well.
+ break
+ }
+
+ // Copy entry into return buffer.
+ s := unsafe.Slice((*byte)(unsafe.Pointer(&entry)), reclen)
+ copy(buf, s)
+
+ buf = buf[reclen:]
+ n += reclen
+ cnt++
+ }
+ // Set the seek offset of the input fd to record
+ // how many files we've already returned.
+ _, err = Seek(fd, cnt, 0 /* SEEK_SET */)
+ if err != nil {
+ return n, err
+ }
+
+ return n, nil
+}
+
// SockaddrDatalink implements the Sockaddr interface for AF_LINK type sockets.
type SockaddrDatalink struct {
Len uint8
diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go
index c3c4c698e07..b11ede89a96 100644
--- a/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go
+++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go
@@ -61,7 +61,7 @@ func PtraceGetFsBase(pid int, fsbase *int64) (err error) {
}
func PtraceIO(req int, pid int, addr uintptr, out []byte, countin int) (count int, err error) {
- ioDesc := PtraceIoDesc{Op: int32(req), Offs: (*byte)(unsafe.Pointer(addr)), Addr: (*byte)(unsafe.Pointer(&out[0])), Len: uint32(countin)}
+ ioDesc := PtraceIoDesc{Op: int32(req), Offs: uintptr(unsafe.Pointer(addr)), Addr: uintptr(unsafe.Pointer(&out[0])), Len: uint32(countin)}
err = ptrace(PT_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0)
return int(ioDesc.Len), err
}
diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go
index 82be61a2f98..9ed8eec6c28 100644
--- a/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go
+++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go
@@ -61,7 +61,7 @@ func PtraceGetFsBase(pid int, fsbase *int64) (err error) {
}
func PtraceIO(req int, pid int, addr uintptr, out []byte, countin int) (count int, err error) {
- ioDesc := PtraceIoDesc{Op: int32(req), Offs: (*byte)(unsafe.Pointer(addr)), Addr: (*byte)(unsafe.Pointer(&out[0])), Len: uint64(countin)}
+ ioDesc := PtraceIoDesc{Op: int32(req), Offs: uintptr(unsafe.Pointer(addr)), Addr: uintptr(unsafe.Pointer(&out[0])), Len: uint64(countin)}
err = ptrace(PT_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0)
return int(ioDesc.Len), err
}
diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go
index cd58f1026c0..f8ac9824790 100644
--- a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go
+++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go
@@ -57,7 +57,7 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e
func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno)
func PtraceIO(req int, pid int, addr uintptr, out []byte, countin int) (count int, err error) {
- ioDesc := PtraceIoDesc{Op: int32(req), Offs: (*byte)(unsafe.Pointer(addr)), Addr: (*byte)(unsafe.Pointer(&out[0])), Len: uint32(countin)}
+ ioDesc := PtraceIoDesc{Op: int32(req), Offs: uintptr(unsafe.Pointer(addr)), Addr: uintptr(unsafe.Pointer(&out[0])), Len: uint32(countin)}
err = ptrace(PT_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0)
return int(ioDesc.Len), err
}
diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go
index d6f538f9e00..8e932036ec3 100644
--- a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go
+++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go
@@ -57,7 +57,7 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e
func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno)
func PtraceIO(req int, pid int, addr uintptr, out []byte, countin int) (count int, err error) {
- ioDesc := PtraceIoDesc{Op: int32(req), Offs: (*byte)(unsafe.Pointer(addr)), Addr: (*byte)(unsafe.Pointer(&out[0])), Len: uint64(countin)}
+ ioDesc := PtraceIoDesc{Op: int32(req), Offs: uintptr(unsafe.Pointer(addr)), Addr: uintptr(unsafe.Pointer(&out[0])), Len: uint64(countin)}
err = ptrace(PT_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0)
return int(ioDesc.Len), err
}
diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_riscv64.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_riscv64.go
index 8ea6e96100a..cbe12227896 100644
--- a/vendor/golang.org/x/sys/unix/syscall_freebsd_riscv64.go
+++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_riscv64.go
@@ -57,7 +57,7 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e
func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno)
func PtraceIO(req int, pid int, addr uintptr, out []byte, countin int) (count int, err error) {
- ioDesc := PtraceIoDesc{Op: int32(req), Offs: (*byte)(unsafe.Pointer(addr)), Addr: (*byte)(unsafe.Pointer(&out[0])), Len: uint64(countin)}
+ ioDesc := PtraceIoDesc{Op: int32(req), Offs: uintptr(unsafe.Pointer(addr)), Addr: uintptr(unsafe.Pointer(&out[0])), Len: uint64(countin)}
err = ptrace(PT_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0)
return int(ioDesc.Len), err
}
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go
index ecb0f27fb80..e044d5b546b 100644
--- a/vendor/golang.org/x/sys/unix/syscall_linux.go
+++ b/vendor/golang.org/x/sys/unix/syscall_linux.go
@@ -13,6 +13,7 @@ package unix
import (
"encoding/binary"
+ "strconv"
"syscall"
"time"
"unsafe"
@@ -233,7 +234,7 @@ func Futimesat(dirfd int, path string, tv []Timeval) error {
func Futimes(fd int, tv []Timeval) (err error) {
// Believe it or not, this is the best we can do on Linux
// (and is what glibc does).
- return Utimes("/proc/self/fd/"+itoa(fd), tv)
+ return Utimes("/proc/self/fd/"+strconv.Itoa(fd), tv)
}
const ImplementsGetwd = true
@@ -1891,17 +1892,28 @@ func PrctlRetInt(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uint
return int(ret), nil
}
-// issue 1435.
-// On linux Setuid and Setgid only affects the current thread, not the process.
-// This does not match what most callers expect so we must return an error
-// here rather than letting the caller think that the call succeeded.
-
func Setuid(uid int) (err error) {
- return EOPNOTSUPP
+ return syscall.Setuid(uid)
+}
+
+func Setgid(gid int) (err error) {
+ return syscall.Setgid(gid)
+}
+
+func Setreuid(ruid, euid int) (err error) {
+ return syscall.Setreuid(ruid, euid)
+}
+
+func Setregid(rgid, egid int) (err error) {
+ return syscall.Setregid(rgid, egid)
}
-func Setgid(uid int) (err error) {
- return EOPNOTSUPP
+func Setresuid(ruid, euid, suid int) (err error) {
+ return syscall.Setresuid(ruid, euid, suid)
+}
+
+func Setresgid(rgid, egid, sgid int) (err error) {
+ return syscall.Setresgid(rgid, egid, sgid)
}
// SetfsgidRetGid sets fsgid for current thread and returns previous fsgid set.
@@ -2240,7 +2252,7 @@ func (fh *FileHandle) Bytes() []byte {
if n == 0 {
return nil
}
- return (*[1 << 30]byte)(unsafe.Pointer(uintptr(unsafe.Pointer(&fh.fileHandle.Type)) + 4))[:n:n]
+ return unsafe.Slice((*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(&fh.fileHandle.Type))+4)), n)
}
// NameToHandleAt wraps the name_to_handle_at system call; it obtains
@@ -2356,6 +2368,16 @@ func Setitimer(which ItimerWhich, it Itimerval) (Itimerval, error) {
return prev, nil
}
+//sysnb rtSigprocmask(how int, set *Sigset_t, oldset *Sigset_t, sigsetsize uintptr) (err error) = SYS_RT_SIGPROCMASK
+
+func PthreadSigmask(how int, set, oldset *Sigset_t) error {
+ if oldset != nil {
+ // Explicitly clear in case Sigset_t is larger than _C__NSIG.
+ *oldset = Sigset_t{}
+ }
+ return rtSigprocmask(how, set, oldset, _C__NSIG/8)
+}
+
/*
* Unimplemented
*/
@@ -2414,7 +2436,6 @@ func Setitimer(which ItimerWhich, it Itimerval) (Itimerval, error) {
// RestartSyscall
// RtSigaction
// RtSigpending
-// RtSigprocmask
// RtSigqueueinfo
// RtSigreturn
// RtSigsuspend
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_386.go b/vendor/golang.org/x/sys/unix/syscall_linux_386.go
index 518e476e6dd..ff5b5899d6d 100644
--- a/vendor/golang.org/x/sys/unix/syscall_linux_386.go
+++ b/vendor/golang.org/x/sys/unix/syscall_linux_386.go
@@ -41,10 +41,6 @@ func setTimeval(sec, usec int64) Timeval {
//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) = SYS_SENDFILE64
//sys setfsgid(gid int) (prev int, err error) = SYS_SETFSGID32
//sys setfsuid(uid int) (prev int, err error) = SYS_SETFSUID32
-//sysnb Setregid(rgid int, egid int) (err error) = SYS_SETREGID32
-//sysnb Setresgid(rgid int, egid int, sgid int) (err error) = SYS_SETRESGID32
-//sysnb Setresuid(ruid int, euid int, suid int) (err error) = SYS_SETRESUID32
-//sysnb Setreuid(ruid int, euid int) (err error) = SYS_SETREUID32
//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error)
//sys Stat(path string, stat *Stat_t) (err error) = SYS_STAT64
//sys SyncFileRange(fd int, off int64, n int64, flags int) (err error)
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go b/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go
index f5e9d6bef10..9b270353298 100644
--- a/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go
+++ b/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go
@@ -46,11 +46,7 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err
//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error)
//sys setfsgid(gid int) (prev int, err error)
//sys setfsuid(uid int) (prev int, err error)
-//sysnb Setregid(rgid int, egid int) (err error)
-//sysnb Setresgid(rgid int, egid int, sgid int) (err error)
-//sysnb Setresuid(ruid int, euid int, suid int) (err error)
//sysnb Setrlimit(resource int, rlim *Rlimit) (err error)
-//sysnb Setreuid(ruid int, euid int) (err error)
//sys Shutdown(fd int, how int) (err error)
//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error)
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm.go
index c1a7778f105..856ad1d635c 100644
--- a/vendor/golang.org/x/sys/unix/syscall_linux_arm.go
+++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm.go
@@ -62,10 +62,6 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) {
//sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) = SYS__NEWSELECT
//sys setfsgid(gid int) (prev int, err error) = SYS_SETFSGID32
//sys setfsuid(uid int) (prev int, err error) = SYS_SETFSUID32
-//sysnb Setregid(rgid int, egid int) (err error) = SYS_SETREGID32
-//sysnb Setresgid(rgid int, egid int, sgid int) (err error) = SYS_SETRESGID32
-//sysnb Setresuid(ruid int, euid int, suid int) (err error) = SYS_SETRESUID32
-//sysnb Setreuid(ruid int, euid int) (err error) = SYS_SETREUID32
//sys Shutdown(fd int, how int) (err error)
//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error)
//sys Stat(path string, stat *Stat_t) (err error) = SYS_STAT64
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go
index d83e2c65716..6422704bc52 100644
--- a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go
+++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go
@@ -39,11 +39,7 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err
//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error)
//sys setfsgid(gid int) (prev int, err error)
//sys setfsuid(uid int) (prev int, err error)
-//sysnb Setregid(rgid int, egid int) (err error)
-//sysnb Setresgid(rgid int, egid int, sgid int) (err error)
-//sysnb Setresuid(ruid int, euid int, suid int) (err error)
//sysnb setrlimit(resource int, rlim *Rlimit) (err error)
-//sysnb Setreuid(ruid int, euid int) (err error)
//sys Shutdown(fd int, how int) (err error)
//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error)
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go b/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go
index 0b69c3eff96..59dab510e97 100644
--- a/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go
+++ b/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go
@@ -34,10 +34,6 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err
//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error)
//sys setfsgid(gid int) (prev int, err error)
//sys setfsuid(uid int) (prev int, err error)
-//sysnb Setregid(rgid int, egid int) (err error)
-//sysnb Setresgid(rgid int, egid int, sgid int) (err error)
-//sysnb Setresuid(ruid int, euid int, suid int) (err error)
-//sysnb Setreuid(ruid int, euid int) (err error)
//sys Shutdown(fd int, how int) (err error)
//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error)
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go b/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go
index 98a2660b91f..bfef09a39eb 100644
--- a/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go
+++ b/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go
@@ -37,11 +37,7 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err
//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error)
//sys setfsgid(gid int) (prev int, err error)
//sys setfsuid(uid int) (prev int, err error)
-//sysnb Setregid(rgid int, egid int) (err error)
-//sysnb Setresgid(rgid int, egid int, sgid int) (err error)
-//sysnb Setresuid(ruid int, euid int, suid int) (err error)
//sysnb Setrlimit(resource int, rlim *Rlimit) (err error)
-//sysnb Setreuid(ruid int, euid int) (err error)
//sys Shutdown(fd int, how int) (err error)
//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error)
//sys Statfs(path string, buf *Statfs_t) (err error)
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go b/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go
index b8a18c0ad22..ab302509663 100644
--- a/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go
+++ b/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go
@@ -32,10 +32,6 @@ func Syscall9(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr,
//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) = SYS_SENDFILE64
//sys setfsgid(gid int) (prev int, err error)
//sys setfsuid(uid int) (prev int, err error)
-//sysnb Setregid(rgid int, egid int) (err error)
-//sysnb Setresgid(rgid int, egid int, sgid int) (err error)
-//sysnb Setresuid(ruid int, euid int, suid int) (err error)
-//sysnb Setreuid(ruid int, euid int) (err error)
//sys Shutdown(fd int, how int) (err error)
//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error)
//sys SyncFileRange(fd int, off int64, n int64, flags int) (err error)
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_ppc.go b/vendor/golang.org/x/sys/unix/syscall_linux_ppc.go
index 4ed9e67c6df..eac1cf1acc8 100644
--- a/vendor/golang.org/x/sys/unix/syscall_linux_ppc.go
+++ b/vendor/golang.org/x/sys/unix/syscall_linux_ppc.go
@@ -34,10 +34,6 @@ import (
//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) = SYS_SENDFILE64
//sys setfsgid(gid int) (prev int, err error)
//sys setfsuid(uid int) (prev int, err error)
-//sysnb Setregid(rgid int, egid int) (err error)
-//sysnb Setresgid(rgid int, egid int, sgid int) (err error)
-//sysnb Setresuid(ruid int, euid int, suid int) (err error)
-//sysnb Setreuid(ruid int, euid int) (err error)
//sys Shutdown(fd int, how int) (err error)
//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error)
//sys Stat(path string, stat *Stat_t) (err error) = SYS_STAT64
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go b/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go
index db63d384c5b..4df56616b8f 100644
--- a/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go
+++ b/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go
@@ -34,11 +34,7 @@ package unix
//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error)
//sys setfsgid(gid int) (prev int, err error)
//sys setfsuid(uid int) (prev int, err error)
-//sysnb Setregid(rgid int, egid int) (err error)
-//sysnb Setresgid(rgid int, egid int, sgid int) (err error)
-//sysnb Setresuid(ruid int, euid int, suid int) (err error)
//sysnb Setrlimit(resource int, rlim *Rlimit) (err error)
-//sysnb Setreuid(ruid int, euid int) (err error)
//sys Shutdown(fd int, how int) (err error)
//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error)
//sys Stat(path string, stat *Stat_t) (err error)
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go b/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go
index 925a748a39b..5f4243dea2c 100644
--- a/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go
+++ b/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go
@@ -38,11 +38,7 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err
//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error)
//sys setfsgid(gid int) (prev int, err error)
//sys setfsuid(uid int) (prev int, err error)
-//sysnb Setregid(rgid int, egid int) (err error)
-//sysnb Setresgid(rgid int, egid int, sgid int) (err error)
-//sysnb Setresuid(ruid int, euid int, suid int) (err error)
//sysnb Setrlimit(resource int, rlim *Rlimit) (err error)
-//sysnb Setreuid(ruid int, euid int) (err error)
//sys Shutdown(fd int, how int) (err error)
//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error)
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go b/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go
index 6fcf277b0d7..d0a7d406685 100644
--- a/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go
+++ b/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go
@@ -34,11 +34,7 @@ import (
//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error)
//sys setfsgid(gid int) (prev int, err error)
//sys setfsuid(uid int) (prev int, err error)
-//sysnb Setregid(rgid int, egid int) (err error)
-//sysnb Setresgid(rgid int, egid int, sgid int) (err error)
-//sysnb Setresuid(ruid int, euid int, suid int) (err error)
//sysnb Setrlimit(resource int, rlim *Rlimit) (err error)
-//sysnb Setreuid(ruid int, euid int) (err error)
//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error)
//sys Stat(path string, stat *Stat_t) (err error)
//sys Statfs(path string, buf *Statfs_t) (err error)
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go b/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go
index 02a45d9cc06..f5c793be26d 100644
--- a/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go
+++ b/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go
@@ -31,11 +31,7 @@ package unix
//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error)
//sys setfsgid(gid int) (prev int, err error)
//sys setfsuid(uid int) (prev int, err error)
-//sysnb Setregid(rgid int, egid int) (err error)
-//sysnb Setresgid(rgid int, egid int, sgid int) (err error)
-//sysnb Setresuid(ruid int, euid int, suid int) (err error)
//sysnb Setrlimit(resource int, rlim *Rlimit) (err error)
-//sysnb Setreuid(ruid int, euid int) (err error)
//sys Shutdown(fd int, how int) (err error)
//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error)
//sys Stat(path string, stat *Stat_t) (err error)
diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_libc.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_libc.go
index e23c33de651..5930a8972b1 100644
--- a/vendor/golang.org/x/sys/unix/syscall_openbsd_libc.go
+++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_libc.go
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build (openbsd && 386) || (openbsd && amd64) || (openbsd && arm64)
-// +build openbsd,386 openbsd,amd64 openbsd,arm64
+//go:build (openbsd && 386) || (openbsd && amd64) || (openbsd && arm) || (openbsd && arm64)
+// +build openbsd,386 openbsd,amd64 openbsd,arm openbsd,arm64
package unix
diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris.go b/vendor/golang.org/x/sys/unix/syscall_solaris.go
index b5ec457cdcc..8c6f4092abe 100644
--- a/vendor/golang.org/x/sys/unix/syscall_solaris.go
+++ b/vendor/golang.org/x/sys/unix/syscall_solaris.go
@@ -750,8 +750,8 @@ type EventPort struct {
// we should handle things gracefully. To do so, we need to keep an extra
// reference to the cookie around until the event is processed
// thus the otherwise seemingly extraneous "cookies" map
- // The key of this map is a pointer to the corresponding &fCookie.cookie
- cookies map[*interface{}]*fileObjCookie
+ // The key of this map is a pointer to the corresponding fCookie
+ cookies map[*fileObjCookie]struct{}
}
// PortEvent is an abstraction of the port_event C struct.
@@ -778,7 +778,7 @@ func NewEventPort() (*EventPort, error) {
port: port,
fds: make(map[uintptr]*fileObjCookie),
paths: make(map[string]*fileObjCookie),
- cookies: make(map[*interface{}]*fileObjCookie),
+ cookies: make(map[*fileObjCookie]struct{}),
}
return e, nil
}
@@ -799,6 +799,7 @@ func (e *EventPort) Close() error {
}
e.fds = nil
e.paths = nil
+ e.cookies = nil
return nil
}
@@ -826,17 +827,16 @@ func (e *EventPort) AssociatePath(path string, stat os.FileInfo, events int, coo
if _, found := e.paths[path]; found {
return fmt.Errorf("%v is already associated with this Event Port", path)
}
- fobj, err := createFileObj(path, stat)
+ fCookie, err := createFileObjCookie(path, stat, cookie)
if err != nil {
return err
}
- fCookie := &fileObjCookie{fobj, cookie}
- _, err = port_associate(e.port, PORT_SOURCE_FILE, uintptr(unsafe.Pointer(fobj)), events, (*byte)(unsafe.Pointer(&fCookie.cookie)))
+ _, err = port_associate(e.port, PORT_SOURCE_FILE, uintptr(unsafe.Pointer(fCookie.fobj)), events, (*byte)(unsafe.Pointer(fCookie)))
if err != nil {
return err
}
e.paths[path] = fCookie
- e.cookies[&fCookie.cookie] = fCookie
+ e.cookies[fCookie] = struct{}{}
return nil
}
@@ -858,7 +858,7 @@ func (e *EventPort) DissociatePath(path string) error {
if err == nil {
// dissociate was successful, safe to delete the cookie
fCookie := e.paths[path]
- delete(e.cookies, &fCookie.cookie)
+ delete(e.cookies, fCookie)
}
delete(e.paths, path)
return err
@@ -871,13 +871,16 @@ func (e *EventPort) AssociateFd(fd uintptr, events int, cookie interface{}) erro
if _, found := e.fds[fd]; found {
return fmt.Errorf("%v is already associated with this Event Port", fd)
}
- fCookie := &fileObjCookie{nil, cookie}
- _, err := port_associate(e.port, PORT_SOURCE_FD, fd, events, (*byte)(unsafe.Pointer(&fCookie.cookie)))
+ fCookie, err := createFileObjCookie("", nil, cookie)
+ if err != nil {
+ return err
+ }
+ _, err = port_associate(e.port, PORT_SOURCE_FD, fd, events, (*byte)(unsafe.Pointer(fCookie)))
if err != nil {
return err
}
e.fds[fd] = fCookie
- e.cookies[&fCookie.cookie] = fCookie
+ e.cookies[fCookie] = struct{}{}
return nil
}
@@ -896,27 +899,31 @@ func (e *EventPort) DissociateFd(fd uintptr) error {
if err == nil {
// dissociate was successful, safe to delete the cookie
fCookie := e.fds[fd]
- delete(e.cookies, &fCookie.cookie)
+ delete(e.cookies, fCookie)
}
delete(e.fds, fd)
return err
}
-func createFileObj(name string, stat os.FileInfo) (*fileObj, error) {
- fobj := new(fileObj)
- bs, err := ByteSliceFromString(name)
- if err != nil {
- return nil, err
- }
- fobj.Name = (*int8)(unsafe.Pointer(&bs[0]))
- s := stat.Sys().(*syscall.Stat_t)
- fobj.Atim.Sec = s.Atim.Sec
- fobj.Atim.Nsec = s.Atim.Nsec
- fobj.Mtim.Sec = s.Mtim.Sec
- fobj.Mtim.Nsec = s.Mtim.Nsec
- fobj.Ctim.Sec = s.Ctim.Sec
- fobj.Ctim.Nsec = s.Ctim.Nsec
- return fobj, nil
+func createFileObjCookie(name string, stat os.FileInfo, cookie interface{}) (*fileObjCookie, error) {
+ fCookie := new(fileObjCookie)
+ fCookie.cookie = cookie
+ if name != "" && stat != nil {
+ fCookie.fobj = new(fileObj)
+ bs, err := ByteSliceFromString(name)
+ if err != nil {
+ return nil, err
+ }
+ fCookie.fobj.Name = (*int8)(unsafe.Pointer(&bs[0]))
+ s := stat.Sys().(*syscall.Stat_t)
+ fCookie.fobj.Atim.Sec = s.Atim.Sec
+ fCookie.fobj.Atim.Nsec = s.Atim.Nsec
+ fCookie.fobj.Mtim.Sec = s.Mtim.Sec
+ fCookie.fobj.Mtim.Nsec = s.Mtim.Nsec
+ fCookie.fobj.Ctim.Sec = s.Ctim.Sec
+ fCookie.fobj.Ctim.Nsec = s.Ctim.Nsec
+ }
+ return fCookie, nil
}
// GetOne wraps port_get(3c) and returns a single PortEvent.
@@ -929,44 +936,50 @@ func (e *EventPort) GetOne(t *Timespec) (*PortEvent, error) {
p := new(PortEvent)
e.mu.Lock()
defer e.mu.Unlock()
- e.peIntToExt(pe, p)
+ err = e.peIntToExt(pe, p)
+ if err != nil {
+ return nil, err
+ }
return p, nil
}
// peIntToExt converts a cgo portEvent struct into the friendlier PortEvent
// NOTE: Always call this function while holding the e.mu mutex
-func (e *EventPort) peIntToExt(peInt *portEvent, peExt *PortEvent) {
+func (e *EventPort) peIntToExt(peInt *portEvent, peExt *PortEvent) error {
+ if e.cookies == nil {
+ return fmt.Errorf("this EventPort is already closed")
+ }
peExt.Events = peInt.Events
peExt.Source = peInt.Source
- cookie := (*interface{})(unsafe.Pointer(peInt.User))
- peExt.Cookie = *cookie
+ fCookie := (*fileObjCookie)(unsafe.Pointer(peInt.User))
+ _, found := e.cookies[fCookie]
+
+ if !found {
+ panic("unexpected event port address; may be due to kernel bug; see https://go.dev/issue/54254")
+ }
+ peExt.Cookie = fCookie.cookie
+ delete(e.cookies, fCookie)
+
switch peInt.Source {
case PORT_SOURCE_FD:
- delete(e.cookies, cookie)
peExt.Fd = uintptr(peInt.Object)
// Only remove the fds entry if it exists and this cookie matches
if fobj, ok := e.fds[peExt.Fd]; ok {
- if &fobj.cookie == cookie {
+ if fobj == fCookie {
delete(e.fds, peExt.Fd)
}
}
case PORT_SOURCE_FILE:
- if fCookie, ok := e.cookies[cookie]; ok && uintptr(unsafe.Pointer(fCookie.fobj)) == uintptr(peInt.Object) {
- // Use our stashed reference rather than using unsafe on what we got back
- // the unsafe version would be (*fileObj)(unsafe.Pointer(uintptr(peInt.Object)))
- peExt.fobj = fCookie.fobj
- } else {
- panic("mismanaged memory")
- }
- delete(e.cookies, cookie)
+ peExt.fobj = fCookie.fobj
peExt.Path = BytePtrToString((*byte)(unsafe.Pointer(peExt.fobj.Name)))
// Only remove the paths entry if it exists and this cookie matches
if fobj, ok := e.paths[peExt.Path]; ok {
- if &fobj.cookie == cookie {
+ if fobj == fCookie {
delete(e.paths, peExt.Path)
}
}
}
+ return nil
}
// Pending wraps port_getn(3c) and returns how many events are pending.
@@ -990,7 +1003,7 @@ func (e *EventPort) Get(s []PortEvent, min int, timeout *Timespec) (int, error)
got := uint32(min)
max := uint32(len(s))
var err error
- ps := make([]portEvent, max, max)
+ ps := make([]portEvent, max)
_, err = port_getn(e.port, &ps[0], max, &got, timeout)
// got will be trustworthy with ETIME, but not any other error.
if err != nil && err != ETIME {
@@ -998,8 +1011,18 @@ func (e *EventPort) Get(s []PortEvent, min int, timeout *Timespec) (int, error)
}
e.mu.Lock()
defer e.mu.Unlock()
+ valid := 0
for i := 0; i < int(got); i++ {
- e.peIntToExt(&ps[i], &s[i])
+ err2 := e.peIntToExt(&ps[i], &s[i])
+ if err2 != nil {
+ if valid == 0 && err == nil {
+ // If err2 is the only error and there are no valid events
+ // to return, return it to the caller.
+ err = err2
+ }
+ break
+ }
+ valid = i + 1
}
- return int(got), err
+ return valid, err
}
diff --git a/vendor/golang.org/x/sys/unix/syscall_unix.go b/vendor/golang.org/x/sys/unix/syscall_unix.go
index 1ff5060b512..00bafda8654 100644
--- a/vendor/golang.org/x/sys/unix/syscall_unix.go
+++ b/vendor/golang.org/x/sys/unix/syscall_unix.go
@@ -13,8 +13,6 @@ import (
"sync"
"syscall"
"unsafe"
-
- "golang.org/x/sys/internal/unsafeheader"
)
var (
@@ -117,11 +115,7 @@ func (m *mmapper) Mmap(fd int, offset int64, length int, prot int, flags int) (d
}
// Use unsafe to convert addr into a []byte.
- var b []byte
- hdr := (*unsafeheader.Slice)(unsafe.Pointer(&b))
- hdr.Data = unsafe.Pointer(addr)
- hdr.Cap = length
- hdr.Len = length
+ b := unsafe.Slice((*byte)(unsafe.Pointer(addr)), length)
// Register mapping in m and return it.
p := &b[cap(b)-1]
@@ -429,11 +423,15 @@ func Send(s int, buf []byte, flags int) (err error) {
}
func Sendto(fd int, p []byte, flags int, to Sockaddr) (err error) {
- ptr, n, err := to.sockaddr()
- if err != nil {
- return err
+ var ptr unsafe.Pointer
+ var salen _Socklen
+ if to != nil {
+ ptr, salen, err = to.sockaddr()
+ if err != nil {
+ return err
+ }
}
- return sendto(fd, p, flags, ptr, n)
+ return sendto(fd, p, flags, ptr, salen)
}
func SetsockoptByte(fd, level, opt int, value byte) (err error) {
diff --git a/vendor/golang.org/x/sys/unix/sysvshm_unix.go b/vendor/golang.org/x/sys/unix/sysvshm_unix.go
index 0bb4c8de557..5bb41d17bc4 100644
--- a/vendor/golang.org/x/sys/unix/sysvshm_unix.go
+++ b/vendor/golang.org/x/sys/unix/sysvshm_unix.go
@@ -7,11 +7,7 @@
package unix
-import (
- "unsafe"
-
- "golang.org/x/sys/internal/unsafeheader"
-)
+import "unsafe"
// SysvShmAttach attaches the Sysv shared memory segment associated with the
// shared memory identifier id.
@@ -34,12 +30,7 @@ func SysvShmAttach(id int, addr uintptr, flag int) ([]byte, error) {
}
// Use unsafe to convert addr into a []byte.
- // TODO: convert to unsafe.Slice once we can assume Go 1.17
- var b []byte
- hdr := (*unsafeheader.Slice)(unsafe.Pointer(&b))
- hdr.Data = unsafe.Pointer(addr)
- hdr.Cap = int(info.Segsz)
- hdr.Len = int(info.Segsz)
+ b := unsafe.Slice((*byte)(unsafe.Pointer(addr)), int(info.Segsz))
return b, nil
}
diff --git a/vendor/golang.org/x/sys/unix/xattr_bsd.go b/vendor/golang.org/x/sys/unix/xattr_bsd.go
index 25df1e37801..663b3779de2 100644
--- a/vendor/golang.org/x/sys/unix/xattr_bsd.go
+++ b/vendor/golang.org/x/sys/unix/xattr_bsd.go
@@ -160,13 +160,12 @@ func Lremovexattr(link string, attr string) (err error) {
}
func Listxattr(file string, dest []byte) (sz int, err error) {
- d := initxattrdest(dest, 0)
destsiz := len(dest)
// FreeBSD won't allow you to list xattrs from multiple namespaces
- s := 0
+ s, pos := 0, 0
for _, nsid := range [...]int{EXTATTR_NAMESPACE_USER, EXTATTR_NAMESPACE_SYSTEM} {
- stmp, e := ExtattrListFile(file, nsid, uintptr(d), destsiz)
+ stmp, e := ListxattrNS(file, nsid, dest[pos:])
/* Errors accessing system attrs are ignored so that
* we can implement the Linux-like behavior of omitting errors that
@@ -175,66 +174,102 @@ func Listxattr(file string, dest []byte) (sz int, err error) {
* Linux will still error if we ask for user attributes on a file that
* we don't have read permissions on, so don't ignore those errors
*/
- if e != nil && e == EPERM && nsid != EXTATTR_NAMESPACE_USER {
- continue
- } else if e != nil {
+ if e != nil {
+ if e == EPERM && nsid != EXTATTR_NAMESPACE_USER {
+ continue
+ }
return s, e
}
s += stmp
- destsiz -= s
- if destsiz < 0 {
- destsiz = 0
+ pos = s
+ if pos > destsiz {
+ pos = destsiz
}
- d = initxattrdest(dest, s)
}
return s, nil
}
-func Flistxattr(fd int, dest []byte) (sz int, err error) {
+func ListxattrNS(file string, nsid int, dest []byte) (sz int, err error) {
d := initxattrdest(dest, 0)
destsiz := len(dest)
- s := 0
+ s, e := ExtattrListFile(file, nsid, uintptr(d), destsiz)
+ if e != nil {
+ return 0, err
+ }
+
+ return s, nil
+}
+
+func Flistxattr(fd int, dest []byte) (sz int, err error) {
+ destsiz := len(dest)
+
+ s, pos := 0, 0
for _, nsid := range [...]int{EXTATTR_NAMESPACE_USER, EXTATTR_NAMESPACE_SYSTEM} {
- stmp, e := ExtattrListFd(fd, nsid, uintptr(d), destsiz)
- if e != nil && e == EPERM && nsid != EXTATTR_NAMESPACE_USER {
- continue
- } else if e != nil {
+ stmp, e := FlistxattrNS(fd, nsid, dest[pos:])
+
+ if e != nil {
+ if e == EPERM && nsid != EXTATTR_NAMESPACE_USER {
+ continue
+ }
return s, e
}
s += stmp
- destsiz -= s
- if destsiz < 0 {
- destsiz = 0
+ pos = s
+ if pos > destsiz {
+ pos = destsiz
}
- d = initxattrdest(dest, s)
}
return s, nil
}
-func Llistxattr(link string, dest []byte) (sz int, err error) {
+func FlistxattrNS(fd int, nsid int, dest []byte) (sz int, err error) {
d := initxattrdest(dest, 0)
destsiz := len(dest)
- s := 0
+ s, e := ExtattrListFd(fd, nsid, uintptr(d), destsiz)
+ if e != nil {
+ return 0, err
+ }
+
+ return s, nil
+}
+
+func Llistxattr(link string, dest []byte) (sz int, err error) {
+ destsiz := len(dest)
+
+ s, pos := 0, 0
for _, nsid := range [...]int{EXTATTR_NAMESPACE_USER, EXTATTR_NAMESPACE_SYSTEM} {
- stmp, e := ExtattrListLink(link, nsid, uintptr(d), destsiz)
- if e != nil && e == EPERM && nsid != EXTATTR_NAMESPACE_USER {
- continue
- } else if e != nil {
+ stmp, e := LlistxattrNS(link, nsid, dest[pos:])
+
+ if e != nil {
+ if e == EPERM && nsid != EXTATTR_NAMESPACE_USER {
+ continue
+ }
return s, e
}
s += stmp
- destsiz -= s
- if destsiz < 0 {
- destsiz = 0
+ pos = s
+ if pos > destsiz {
+ pos = destsiz
}
- d = initxattrdest(dest, s)
+ }
+
+ return s, nil
+}
+
+func LlistxattrNS(link string, nsid int, dest []byte) (sz int, err error) {
+ d := initxattrdest(dest, 0)
+ destsiz := len(dest)
+
+ s, e := ExtattrListLink(link, nsid, uintptr(d), destsiz)
+ if e != nil {
+ return 0, err
}
return s, nil
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.go
deleted file mode 100644
index a06eb093242..00000000000
--- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.go
+++ /dev/null
@@ -1,40 +0,0 @@
-// go run mksyscall.go -tags darwin,amd64,go1.13 syscall_darwin.1_13.go
-// Code generated by the command above; see README.md. DO NOT EDIT.
-
-//go:build darwin && amd64 && go1.13
-// +build darwin,amd64,go1.13
-
-package unix
-
-import (
- "syscall"
- "unsafe"
-)
-
-var _ syscall.Errno
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func closedir(dir uintptr) (err error) {
- _, _, e1 := syscall_syscall(libc_closedir_trampoline_addr, uintptr(dir), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-var libc_closedir_trampoline_addr uintptr
-
-//go:cgo_import_dynamic libc_closedir closedir "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func readdir_r(dir uintptr, entry *Dirent, result **Dirent) (res Errno) {
- r0, _, _ := syscall_syscall(libc_readdir_r_trampoline_addr, uintptr(dir), uintptr(unsafe.Pointer(entry)), uintptr(unsafe.Pointer(result)))
- res = Errno(r0)
- return
-}
-
-var libc_readdir_r_trampoline_addr uintptr
-
-//go:cgo_import_dynamic libc_readdir_r readdir_r "/usr/lib/libSystem.B.dylib"
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.s
deleted file mode 100644
index f5bb40eda9e..00000000000
--- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.s
+++ /dev/null
@@ -1,25 +0,0 @@
-// go run mkasm.go darwin amd64
-// Code generated by the command above; DO NOT EDIT.
-
-//go:build go1.13
-// +build go1.13
-
-#include "textflag.h"
-
-TEXT libc_fdopendir_trampoline<>(SB),NOSPLIT,$0-0
- JMP libc_fdopendir(SB)
-
-GLOBL ·libc_fdopendir_trampoline_addr(SB), RODATA, $8
-DATA ·libc_fdopendir_trampoline_addr(SB)/8, $libc_fdopendir_trampoline<>(SB)
-
-TEXT libc_closedir_trampoline<>(SB),NOSPLIT,$0-0
- JMP libc_closedir(SB)
-
-GLOBL ·libc_closedir_trampoline_addr(SB), RODATA, $8
-DATA ·libc_closedir_trampoline_addr(SB)/8, $libc_closedir_trampoline<>(SB)
-
-TEXT libc_readdir_r_trampoline<>(SB),NOSPLIT,$0-0
- JMP libc_readdir_r(SB)
-
-GLOBL ·libc_readdir_r_trampoline_addr(SB), RODATA, $8
-DATA ·libc_readdir_r_trampoline_addr(SB)/8, $libc_readdir_r_trampoline<>(SB)
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go
index 467deed7633..c2461c49679 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go
@@ -1,8 +1,8 @@
-// go run mksyscall.go -tags darwin,amd64,go1.12 syscall_bsd.go syscall_darwin.go syscall_darwin_amd64.go
+// go run mksyscall.go -tags darwin,amd64 syscall_bsd.go syscall_darwin.go syscall_darwin_amd64.go
// Code generated by the command above; see README.md. DO NOT EDIT.
-//go:build darwin && amd64 && go1.12
-// +build darwin,amd64,go1.12
+//go:build darwin && amd64
+// +build darwin,amd64
package unix
@@ -463,6 +463,32 @@ var libc_munlockall_trampoline_addr uintptr
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func closedir(dir uintptr) (err error) {
+ _, _, e1 := syscall_syscall(libc_closedir_trampoline_addr, uintptr(dir), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+var libc_closedir_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_closedir closedir "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func readdir_r(dir uintptr, entry *Dirent, result **Dirent) (res Errno) {
+ r0, _, _ := syscall_syscall(libc_readdir_r_trampoline_addr, uintptr(dir), uintptr(unsafe.Pointer(entry)), uintptr(unsafe.Pointer(result)))
+ res = Errno(r0)
+ return
+}
+
+var libc_readdir_r_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_readdir_r readdir_r "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func pipe(p *[2]int32) (err error) {
_, _, e1 := syscall_rawSyscall(libc_pipe_trampoline_addr, uintptr(unsafe.Pointer(p)), 0, 0)
if e1 != 0 {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s
index b41467a0e50..95fe4c0eb96 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s
+++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s
@@ -1,11 +1,14 @@
// go run mkasm.go darwin amd64
// Code generated by the command above; DO NOT EDIT.
-//go:build go1.12
-// +build go1.12
-
#include "textflag.h"
+TEXT libc_fdopendir_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_fdopendir(SB)
+
+GLOBL ·libc_fdopendir_trampoline_addr(SB), RODATA, $8
+DATA ·libc_fdopendir_trampoline_addr(SB)/8, $libc_fdopendir_trampoline<>(SB)
+
TEXT libc_getgroups_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getgroups(SB)
@@ -174,6 +177,18 @@ TEXT libc_munlockall_trampoline<>(SB),NOSPLIT,$0-0
GLOBL ·libc_munlockall_trampoline_addr(SB), RODATA, $8
DATA ·libc_munlockall_trampoline_addr(SB)/8, $libc_munlockall_trampoline<>(SB)
+TEXT libc_closedir_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_closedir(SB)
+
+GLOBL ·libc_closedir_trampoline_addr(SB), RODATA, $8
+DATA ·libc_closedir_trampoline_addr(SB)/8, $libc_closedir_trampoline<>(SB)
+
+TEXT libc_readdir_r_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_readdir_r(SB)
+
+GLOBL ·libc_readdir_r_trampoline_addr(SB), RODATA, $8
+DATA ·libc_readdir_r_trampoline_addr(SB)/8, $libc_readdir_r_trampoline<>(SB)
+
TEXT libc_pipe_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_pipe(SB)
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.go
deleted file mode 100644
index cec595d553a..00000000000
--- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.go
+++ /dev/null
@@ -1,40 +0,0 @@
-// go run mksyscall.go -tags darwin,arm64,go1.13 syscall_darwin.1_13.go
-// Code generated by the command above; see README.md. DO NOT EDIT.
-
-//go:build darwin && arm64 && go1.13
-// +build darwin,arm64,go1.13
-
-package unix
-
-import (
- "syscall"
- "unsafe"
-)
-
-var _ syscall.Errno
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func closedir(dir uintptr) (err error) {
- _, _, e1 := syscall_syscall(libc_closedir_trampoline_addr, uintptr(dir), 0, 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-var libc_closedir_trampoline_addr uintptr
-
-//go:cgo_import_dynamic libc_closedir closedir "/usr/lib/libSystem.B.dylib"
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func readdir_r(dir uintptr, entry *Dirent, result **Dirent) (res Errno) {
- r0, _, _ := syscall_syscall(libc_readdir_r_trampoline_addr, uintptr(dir), uintptr(unsafe.Pointer(entry)), uintptr(unsafe.Pointer(result)))
- res = Errno(r0)
- return
-}
-
-var libc_readdir_r_trampoline_addr uintptr
-
-//go:cgo_import_dynamic libc_readdir_r readdir_r "/usr/lib/libSystem.B.dylib"
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.s
deleted file mode 100644
index 0c3f76bc203..00000000000
--- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.s
+++ /dev/null
@@ -1,25 +0,0 @@
-// go run mkasm.go darwin arm64
-// Code generated by the command above; DO NOT EDIT.
-
-//go:build go1.13
-// +build go1.13
-
-#include "textflag.h"
-
-TEXT libc_fdopendir_trampoline<>(SB),NOSPLIT,$0-0
- JMP libc_fdopendir(SB)
-
-GLOBL ·libc_fdopendir_trampoline_addr(SB), RODATA, $8
-DATA ·libc_fdopendir_trampoline_addr(SB)/8, $libc_fdopendir_trampoline<>(SB)
-
-TEXT libc_closedir_trampoline<>(SB),NOSPLIT,$0-0
- JMP libc_closedir(SB)
-
-GLOBL ·libc_closedir_trampoline_addr(SB), RODATA, $8
-DATA ·libc_closedir_trampoline_addr(SB)/8, $libc_closedir_trampoline<>(SB)
-
-TEXT libc_readdir_r_trampoline<>(SB),NOSPLIT,$0-0
- JMP libc_readdir_r(SB)
-
-GLOBL ·libc_readdir_r_trampoline_addr(SB), RODATA, $8
-DATA ·libc_readdir_r_trampoline_addr(SB)/8, $libc_readdir_r_trampoline<>(SB)
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go
index 35938d34ff8..26a0fdc505b 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go
@@ -1,8 +1,8 @@
-// go run mksyscall.go -tags darwin,arm64,go1.12 syscall_bsd.go syscall_darwin.go syscall_darwin_arm64.go
+// go run mksyscall.go -tags darwin,arm64 syscall_bsd.go syscall_darwin.go syscall_darwin_arm64.go
// Code generated by the command above; see README.md. DO NOT EDIT.
-//go:build darwin && arm64 && go1.12
-// +build darwin,arm64,go1.12
+//go:build darwin && arm64
+// +build darwin,arm64
package unix
@@ -463,6 +463,32 @@ var libc_munlockall_trampoline_addr uintptr
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func closedir(dir uintptr) (err error) {
+ _, _, e1 := syscall_syscall(libc_closedir_trampoline_addr, uintptr(dir), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+var libc_closedir_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_closedir closedir "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func readdir_r(dir uintptr, entry *Dirent, result **Dirent) (res Errno) {
+ r0, _, _ := syscall_syscall(libc_readdir_r_trampoline_addr, uintptr(dir), uintptr(unsafe.Pointer(entry)), uintptr(unsafe.Pointer(result)))
+ res = Errno(r0)
+ return
+}
+
+var libc_readdir_r_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_readdir_r readdir_r "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func pipe(p *[2]int32) (err error) {
_, _, e1 := syscall_rawSyscall(libc_pipe_trampoline_addr, uintptr(unsafe.Pointer(p)), 0, 0)
if e1 != 0 {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s
index e1f9204a208..efa5b4c987c 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s
+++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s
@@ -1,11 +1,14 @@
// go run mkasm.go darwin arm64
// Code generated by the command above; DO NOT EDIT.
-//go:build go1.12
-// +build go1.12
-
#include "textflag.h"
+TEXT libc_fdopendir_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_fdopendir(SB)
+
+GLOBL ·libc_fdopendir_trampoline_addr(SB), RODATA, $8
+DATA ·libc_fdopendir_trampoline_addr(SB)/8, $libc_fdopendir_trampoline<>(SB)
+
TEXT libc_getgroups_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getgroups(SB)
@@ -174,6 +177,18 @@ TEXT libc_munlockall_trampoline<>(SB),NOSPLIT,$0-0
GLOBL ·libc_munlockall_trampoline_addr(SB), RODATA, $8
DATA ·libc_munlockall_trampoline_addr(SB)/8, $libc_munlockall_trampoline<>(SB)
+TEXT libc_closedir_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_closedir(SB)
+
+GLOBL ·libc_closedir_trampoline_addr(SB), RODATA, $8
+DATA ·libc_closedir_trampoline_addr(SB)/8, $libc_closedir_trampoline<>(SB)
+
+TEXT libc_readdir_r_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_readdir_r(SB)
+
+GLOBL ·libc_readdir_r_trampoline_addr(SB), RODATA, $8
+DATA ·libc_readdir_r_trampoline_addr(SB)/8, $libc_readdir_r_trampoline<>(SB)
+
TEXT libc_pipe_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_pipe(SB)
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/vendor/golang.org/x/sys/unix/zsyscall_linux.go
index bc4a2753114..293cf36804e 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux.go
@@ -2151,3 +2151,13 @@ func setitimer(which int, newValue *Itimerval, oldValue *Itimerval) (err error)
}
return
}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func rtSigprocmask(how int, set *Sigset_t, oldset *Sigset_t, sigsetsize uintptr) (err error) {
+ _, _, e1 := RawSyscall6(SYS_RT_SIGPROCMASK, uintptr(how), uintptr(unsafe.Pointer(set)), uintptr(unsafe.Pointer(oldset)), uintptr(sigsetsize), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go
index 88af526b7e2..c81b0ad4777 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go
@@ -287,46 +287,6 @@ func setfsuid(uid int) (prev int, err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func Setregid(rgid int, egid int) (err error) {
- _, _, e1 := RawSyscall(SYS_SETREGID32, uintptr(rgid), uintptr(egid), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Setresgid(rgid int, egid int, sgid int) (err error) {
- _, _, e1 := RawSyscall(SYS_SETRESGID32, uintptr(rgid), uintptr(egid), uintptr(sgid))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Setresuid(ruid int, euid int, suid int) (err error) {
- _, _, e1 := RawSyscall(SYS_SETRESUID32, uintptr(ruid), uintptr(euid), uintptr(suid))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Setreuid(ruid int, euid int) (err error) {
- _, _, e1 := RawSyscall(SYS_SETREUID32, uintptr(ruid), uintptr(euid), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
func Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error) {
r0, _, e1 := Syscall6(SYS_SPLICE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags))
n = int(r0)
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go
index 2a0c4aa6a63..2206bce7f4d 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go
@@ -334,36 +334,6 @@ func setfsuid(uid int) (prev int, err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func Setregid(rgid int, egid int) (err error) {
- _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Setresgid(rgid int, egid int, sgid int) (err error) {
- _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Setresuid(ruid int, euid int, suid int) (err error) {
- _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
func Setrlimit(resource int, rlim *Rlimit) (err error) {
_, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0)
if e1 != 0 {
@@ -374,16 +344,6 @@ func Setrlimit(resource int, rlim *Rlimit) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func Setreuid(ruid int, euid int) (err error) {
- _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
func Shutdown(fd int, how int) (err error) {
_, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0)
if e1 != 0 {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go
index 4882bde3af0..edf6b39f161 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go
@@ -412,46 +412,6 @@ func setfsuid(uid int) (prev int, err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func Setregid(rgid int, egid int) (err error) {
- _, _, e1 := RawSyscall(SYS_SETREGID32, uintptr(rgid), uintptr(egid), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Setresgid(rgid int, egid int, sgid int) (err error) {
- _, _, e1 := RawSyscall(SYS_SETRESGID32, uintptr(rgid), uintptr(egid), uintptr(sgid))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Setresuid(ruid int, euid int, suid int) (err error) {
- _, _, e1 := RawSyscall(SYS_SETRESUID32, uintptr(ruid), uintptr(euid), uintptr(suid))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Setreuid(ruid int, euid int) (err error) {
- _, _, e1 := RawSyscall(SYS_SETREUID32, uintptr(ruid), uintptr(euid), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
func Shutdown(fd int, how int) (err error) {
_, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0)
if e1 != 0 {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go
index 9f8c24e4343..190609f2140 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go
@@ -289,36 +289,6 @@ func setfsuid(uid int) (prev int, err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func Setregid(rgid int, egid int) (err error) {
- _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Setresgid(rgid int, egid int, sgid int) (err error) {
- _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Setresuid(ruid int, euid int, suid int) (err error) {
- _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
func setrlimit(resource int, rlim *Rlimit) (err error) {
_, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0)
if e1 != 0 {
@@ -329,16 +299,6 @@ func setrlimit(resource int, rlim *Rlimit) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func Setreuid(ruid int, euid int) (err error) {
- _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
func Shutdown(fd int, how int) (err error) {
_, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0)
if e1 != 0 {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_loong64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_loong64.go
index 523f2ba03e4..806ffd1e125 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_loong64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_loong64.go
@@ -223,46 +223,6 @@ func setfsuid(uid int) (prev int, err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func Setregid(rgid int, egid int) (err error) {
- _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Setresgid(rgid int, egid int, sgid int) (err error) {
- _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Setresuid(ruid int, euid int, suid int) (err error) {
- _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Setreuid(ruid int, euid int) (err error) {
- _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
func Shutdown(fd int, how int) (err error) {
_, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0)
if e1 != 0 {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go
index d7d6f42441b..5f984cbb1ca 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go
@@ -248,46 +248,6 @@ func setfsuid(uid int) (prev int, err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func Setregid(rgid int, egid int) (err error) {
- _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Setresgid(rgid int, egid int, sgid int) (err error) {
- _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Setresuid(ruid int, euid int, suid int) (err error) {
- _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Setreuid(ruid int, euid int) (err error) {
- _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
func Shutdown(fd int, how int) (err error) {
_, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0)
if e1 != 0 {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go
index 7f1f8e65339..46fc380a40e 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go
@@ -278,36 +278,6 @@ func setfsuid(uid int) (prev int, err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func Setregid(rgid int, egid int) (err error) {
- _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Setresgid(rgid int, egid int, sgid int) (err error) {
- _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Setresuid(ruid int, euid int, suid int) (err error) {
- _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
func Setrlimit(resource int, rlim *Rlimit) (err error) {
_, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0)
if e1 != 0 {
@@ -318,16 +288,6 @@ func Setrlimit(resource int, rlim *Rlimit) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func Setreuid(ruid int, euid int) (err error) {
- _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
func Shutdown(fd int, how int) (err error) {
_, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0)
if e1 != 0 {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go
index f933d0f51a1..cbd0d4dadba 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go
@@ -278,36 +278,6 @@ func setfsuid(uid int) (prev int, err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func Setregid(rgid int, egid int) (err error) {
- _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Setresgid(rgid int, egid int, sgid int) (err error) {
- _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Setresuid(ruid int, euid int, suid int) (err error) {
- _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
func Setrlimit(resource int, rlim *Rlimit) (err error) {
_, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0)
if e1 != 0 {
@@ -318,16 +288,6 @@ func Setrlimit(resource int, rlim *Rlimit) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func Setreuid(ruid int, euid int) (err error) {
- _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
func Shutdown(fd int, how int) (err error) {
_, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0)
if e1 != 0 {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go
index 297d0a99822..0c13d15f07c 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go
@@ -248,46 +248,6 @@ func setfsuid(uid int) (prev int, err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func Setregid(rgid int, egid int) (err error) {
- _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Setresgid(rgid int, egid int, sgid int) (err error) {
- _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Setresuid(ruid int, euid int, suid int) (err error) {
- _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Setreuid(ruid int, euid int) (err error) {
- _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
func Shutdown(fd int, how int) (err error) {
_, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0)
if e1 != 0 {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go
index 2e32e7a449f..e01432aed51 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc.go
@@ -308,46 +308,6 @@ func setfsuid(uid int) (prev int, err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func Setregid(rgid int, egid int) (err error) {
- _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Setresgid(rgid int, egid int, sgid int) (err error) {
- _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Setresuid(ruid int, euid int, suid int) (err error) {
- _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Setreuid(ruid int, euid int) (err error) {
- _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
func Shutdown(fd int, how int) (err error) {
_, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0)
if e1 != 0 {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go
index 3c531704647..13c7ee7baff 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go
@@ -349,36 +349,6 @@ func setfsuid(uid int) (prev int, err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func Setregid(rgid int, egid int) (err error) {
- _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Setresgid(rgid int, egid int, sgid int) (err error) {
- _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Setresuid(ruid int, euid int, suid int) (err error) {
- _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
func Setrlimit(resource int, rlim *Rlimit) (err error) {
_, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0)
if e1 != 0 {
@@ -389,16 +359,6 @@ func Setrlimit(resource int, rlim *Rlimit) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func Setreuid(ruid int, euid int) (err error) {
- _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
func Shutdown(fd int, how int) (err error) {
_, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0)
if e1 != 0 {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go
index a00c6744ecb..02d0c0fd61e 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go
@@ -349,36 +349,6 @@ func setfsuid(uid int) (prev int, err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func Setregid(rgid int, egid int) (err error) {
- _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Setresgid(rgid int, egid int, sgid int) (err error) {
- _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Setresuid(ruid int, euid int, suid int) (err error) {
- _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
func Setrlimit(resource int, rlim *Rlimit) (err error) {
_, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0)
if e1 != 0 {
@@ -389,16 +359,6 @@ func Setrlimit(resource int, rlim *Rlimit) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func Setreuid(ruid int, euid int) (err error) {
- _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
func Shutdown(fd int, how int) (err error) {
_, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0)
if e1 != 0 {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go
index 1239cc2de9c..9fee3b1d239 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go
@@ -269,36 +269,6 @@ func setfsuid(uid int) (prev int, err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func Setregid(rgid int, egid int) (err error) {
- _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Setresgid(rgid int, egid int, sgid int) (err error) {
- _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Setresuid(ruid int, euid int, suid int) (err error) {
- _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
func Setrlimit(resource int, rlim *Rlimit) (err error) {
_, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0)
if e1 != 0 {
@@ -309,16 +279,6 @@ func Setrlimit(resource int, rlim *Rlimit) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func Setreuid(ruid int, euid int) (err error) {
- _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
func Shutdown(fd int, how int) (err error) {
_, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0)
if e1 != 0 {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go
index e0dabc60278..647bbfecd6a 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go
@@ -319,36 +319,6 @@ func setfsuid(uid int) (prev int, err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func Setregid(rgid int, egid int) (err error) {
- _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Setresgid(rgid int, egid int, sgid int) (err error) {
- _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Setresuid(ruid int, euid int, suid int) (err error) {
- _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
func Setrlimit(resource int, rlim *Rlimit) (err error) {
_, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0)
if e1 != 0 {
@@ -359,16 +329,6 @@ func Setrlimit(resource int, rlim *Rlimit) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func Setreuid(ruid int, euid int) (err error) {
- _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
func Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) {
r0, _, e1 := Syscall6(SYS_SPLICE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags))
n = int64(r0)
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go
index 368623c0f2e..ada057f8914 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go
@@ -329,36 +329,6 @@ func setfsuid(uid int) (prev int, err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func Setregid(rgid int, egid int) (err error) {
- _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Setresgid(rgid int, egid int, sgid int) (err error) {
- _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
-func Setresuid(ruid int, euid int, suid int) (err error) {
- _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid))
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
func Setrlimit(resource int, rlim *Rlimit) (err error) {
_, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0)
if e1 != 0 {
@@ -369,16 +339,6 @@ func Setrlimit(resource int, rlim *Rlimit) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-func Setreuid(ruid int, euid int) (err error) {
- _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0)
- if e1 != 0 {
- err = errnoErr(e1)
- }
- return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
func Shutdown(fd int, how int) (err error) {
_, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(fd), uintptr(how), 0)
if e1 != 0 {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go
index 69f80300674..8da6791d1e3 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go
@@ -1,4 +1,4 @@
-// go run mksyscall.go -l32 -openbsd -arm -tags openbsd,arm syscall_bsd.go syscall_openbsd.go syscall_openbsd_arm.go
+// go run mksyscall.go -l32 -openbsd -arm -libc -tags openbsd,arm syscall_bsd.go syscall_openbsd.go syscall_openbsd_arm.go
// Code generated by the command above; see README.md. DO NOT EDIT.
//go:build openbsd && arm
@@ -16,7 +16,7 @@ var _ syscall.Errno
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func getgroups(ngid int, gid *_Gid_t) (n int, err error) {
- r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0)
+ r0, _, e1 := syscall_rawSyscall(libc_getgroups_trampoline_addr, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0)
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
@@ -24,20 +24,28 @@ func getgroups(ngid int, gid *_Gid_t) (n int, err error) {
return
}
+var libc_getgroups_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_getgroups getgroups "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func setgroups(ngid int, gid *_Gid_t) (err error) {
- _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0)
+ _, _, e1 := syscall_rawSyscall(libc_setgroups_trampoline_addr, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_setgroups_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_setgroups setgroups "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) {
- r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0)
+ r0, _, e1 := syscall_syscall6(libc_wait4_trampoline_addr, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0)
wpid = int(r0)
if e1 != 0 {
err = errnoErr(e1)
@@ -45,10 +53,14 @@ func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err
return
}
+var libc_wait4_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_wait4 wait4 "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) {
- r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
+ r0, _, e1 := syscall_syscall(libc_accept_trampoline_addr, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
fd = int(r0)
if e1 != 0 {
err = errnoErr(e1)
@@ -56,30 +68,42 @@ func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) {
return
}
+var libc_accept_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_accept accept "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) {
- _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen))
+ _, _, e1 := syscall_syscall(libc_bind_trampoline_addr, uintptr(s), uintptr(addr), uintptr(addrlen))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_bind_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_bind bind "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) {
- _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen))
+ _, _, e1 := syscall_syscall(libc_connect_trampoline_addr, uintptr(s), uintptr(addr), uintptr(addrlen))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_connect_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_connect connect "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func socket(domain int, typ int, proto int) (fd int, err error) {
- r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto))
+ r0, _, e1 := syscall_rawSyscall(libc_socket_trampoline_addr, uintptr(domain), uintptr(typ), uintptr(proto))
fd = int(r0)
if e1 != 0 {
err = errnoErr(e1)
@@ -87,66 +111,94 @@ func socket(domain int, typ int, proto int) (fd int, err error) {
return
}
+var libc_socket_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_socket socket "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) {
- _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0)
+ _, _, e1 := syscall_syscall6(libc_getsockopt_trampoline_addr, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_getsockopt_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_getsockopt getsockopt "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) {
- _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0)
+ _, _, e1 := syscall_syscall6(libc_setsockopt_trampoline_addr, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_setsockopt_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_setsockopt setsockopt "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) {
- _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
+ _, _, e1 := syscall_rawSyscall(libc_getpeername_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_getpeername_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_getpeername getpeername "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) {
- _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
+ _, _, e1 := syscall_rawSyscall(libc_getsockname_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_getsockname_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_getsockname getsockname "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Shutdown(s int, how int) (err error) {
- _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(s), uintptr(how), 0)
+ _, _, e1 := syscall_syscall(libc_shutdown_trampoline_addr, uintptr(s), uintptr(how), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_shutdown_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_shutdown shutdown "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) {
- _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0)
+ _, _, e1 := syscall_rawSyscall6(libc_socketpair_trampoline_addr, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_socketpair_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_socketpair socketpair "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) {
@@ -156,7 +208,7 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl
} else {
_p0 = unsafe.Pointer(&_zero)
}
- r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen)))
+ r0, _, e1 := syscall_syscall6(libc_recvfrom_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen)))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
@@ -164,6 +216,10 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl
return
}
+var libc_recvfrom_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_recvfrom recvfrom "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) {
@@ -173,17 +229,21 @@ func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (
} else {
_p0 = unsafe.Pointer(&_zero)
}
- _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen))
+ _, _, e1 := syscall_syscall6(libc_sendto_trampoline_addr, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_sendto_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_sendto sendto "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) {
- r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags))
+ r0, _, e1 := syscall_syscall(libc_recvmsg_trampoline_addr, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
@@ -191,10 +251,14 @@ func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) {
return
}
+var libc_recvmsg_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_recvmsg recvmsg "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) {
- r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags))
+ r0, _, e1 := syscall_syscall(libc_sendmsg_trampoline_addr, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
@@ -202,10 +266,14 @@ func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) {
return
}
+var libc_sendmsg_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_sendmsg sendmsg "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, nevent int, timeout *Timespec) (n int, err error) {
- r0, _, e1 := Syscall6(SYS_KEVENT, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout)))
+ r0, _, e1 := syscall_syscall6(libc_kevent_trampoline_addr, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout)))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
@@ -213,6 +281,10 @@ func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, ne
return
}
+var libc_kevent_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_kevent kevent "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func utimes(path string, timeval *[2]Timeval) (err error) {
@@ -221,27 +293,35 @@ func utimes(path string, timeval *[2]Timeval) (err error) {
if err != nil {
return
}
- _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0)
+ _, _, e1 := syscall_syscall(libc_utimes_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_utimes_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_utimes utimes "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func futimes(fd int, timeval *[2]Timeval) (err error) {
- _, _, e1 := Syscall(SYS_FUTIMES, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0)
+ _, _, e1 := syscall_syscall(libc_futimes_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_futimes_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_futimes futimes "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func poll(fds *PollFd, nfds int, timeout int) (n int, err error) {
- r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout))
+ r0, _, e1 := syscall_syscall(libc_poll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
@@ -249,6 +329,10 @@ func poll(fds *PollFd, nfds int, timeout int) (n int, err error) {
return
}
+var libc_poll_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_poll poll "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Madvise(b []byte, behav int) (err error) {
@@ -258,13 +342,17 @@ func Madvise(b []byte, behav int) (err error) {
} else {
_p0 = unsafe.Pointer(&_zero)
}
- _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(behav))
+ _, _, e1 := syscall_syscall(libc_madvise_trampoline_addr, uintptr(_p0), uintptr(len(b)), uintptr(behav))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_madvise_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_madvise madvise "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Mlock(b []byte) (err error) {
@@ -274,23 +362,31 @@ func Mlock(b []byte) (err error) {
} else {
_p0 = unsafe.Pointer(&_zero)
}
- _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0)
+ _, _, e1 := syscall_syscall(libc_mlock_trampoline_addr, uintptr(_p0), uintptr(len(b)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_mlock_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_mlock mlock "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Mlockall(flags int) (err error) {
- _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0)
+ _, _, e1 := syscall_syscall(libc_mlockall_trampoline_addr, uintptr(flags), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_mlockall_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_mlockall mlockall "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Mprotect(b []byte, prot int) (err error) {
@@ -300,13 +396,17 @@ func Mprotect(b []byte, prot int) (err error) {
} else {
_p0 = unsafe.Pointer(&_zero)
}
- _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot))
+ _, _, e1 := syscall_syscall(libc_mprotect_trampoline_addr, uintptr(_p0), uintptr(len(b)), uintptr(prot))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_mprotect_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_mprotect mprotect "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Msync(b []byte, flags int) (err error) {
@@ -316,13 +416,17 @@ func Msync(b []byte, flags int) (err error) {
} else {
_p0 = unsafe.Pointer(&_zero)
}
- _, _, e1 := Syscall(SYS_MSYNC, uintptr(_p0), uintptr(len(b)), uintptr(flags))
+ _, _, e1 := syscall_syscall(libc_msync_trampoline_addr, uintptr(_p0), uintptr(len(b)), uintptr(flags))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_msync_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_msync msync "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Munlock(b []byte) (err error) {
@@ -332,33 +436,45 @@ func Munlock(b []byte) (err error) {
} else {
_p0 = unsafe.Pointer(&_zero)
}
- _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0)
+ _, _, e1 := syscall_syscall(libc_munlock_trampoline_addr, uintptr(_p0), uintptr(len(b)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_munlock_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_munlock munlock "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Munlockall() (err error) {
- _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0)
+ _, _, e1 := syscall_syscall(libc_munlockall_trampoline_addr, 0, 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_munlockall_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_munlockall munlockall "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func pipe2(p *[2]_C_int, flags int) (err error) {
- _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0)
+ _, _, e1 := syscall_rawSyscall(libc_pipe2_trampoline_addr, uintptr(unsafe.Pointer(p)), uintptr(flags), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_pipe2_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_pipe2 pipe2 "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getdents(fd int, buf []byte) (n int, err error) {
@@ -368,7 +484,7 @@ func Getdents(fd int, buf []byte) (n int, err error) {
} else {
_p0 = unsafe.Pointer(&_zero)
}
- r0, _, e1 := Syscall(SYS_GETDENTS, uintptr(fd), uintptr(_p0), uintptr(len(buf)))
+ r0, _, e1 := syscall_syscall(libc_getdents_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(buf)))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
@@ -376,6 +492,10 @@ func Getdents(fd int, buf []byte) (n int, err error) {
return
}
+var libc_getdents_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_getdents getdents "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getcwd(buf []byte) (n int, err error) {
@@ -385,7 +505,7 @@ func Getcwd(buf []byte) (n int, err error) {
} else {
_p0 = unsafe.Pointer(&_zero)
}
- r0, _, e1 := Syscall(SYS___GETCWD, uintptr(_p0), uintptr(len(buf)), 0)
+ r0, _, e1 := syscall_syscall(libc_getcwd_trampoline_addr, uintptr(_p0), uintptr(len(buf)), 0)
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
@@ -393,16 +513,24 @@ func Getcwd(buf []byte) (n int, err error) {
return
}
+var libc_getcwd_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_getcwd getcwd "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func ioctl(fd int, req uint, arg uintptr) (err error) {
- _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg))
+ _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_ioctl_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_ioctl ioctl "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) {
@@ -412,17 +540,21 @@ func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr)
} else {
_p0 = unsafe.Pointer(&_zero)
}
- _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen))
+ _, _, e1 := syscall_syscall6(libc_sysctl_trampoline_addr, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_sysctl_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_sysctl sysctl "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) {
- r0, _, e1 := Syscall6(SYS_PPOLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0)
+ r0, _, e1 := syscall_syscall6(libc_ppoll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0)
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
@@ -430,6 +562,10 @@ func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int,
return
}
+var libc_ppoll_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_ppoll ppoll "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Access(path string, mode uint32) (err error) {
@@ -438,23 +574,31 @@ func Access(path string, mode uint32) (err error) {
if err != nil {
return
}
- _, _, e1 := Syscall(SYS_ACCESS, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0)
+ _, _, e1 := syscall_syscall(libc_access_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_access_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_access access "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Adjtime(delta *Timeval, olddelta *Timeval) (err error) {
- _, _, e1 := Syscall(SYS_ADJTIME, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0)
+ _, _, e1 := syscall_syscall(libc_adjtime_trampoline_addr, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_adjtime_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_adjtime adjtime "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Chdir(path string) (err error) {
@@ -463,13 +607,17 @@ func Chdir(path string) (err error) {
if err != nil {
return
}
- _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0)
+ _, _, e1 := syscall_syscall(libc_chdir_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_chdir_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_chdir chdir "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Chflags(path string, flags int) (err error) {
@@ -478,13 +626,17 @@ func Chflags(path string, flags int) (err error) {
if err != nil {
return
}
- _, _, e1 := Syscall(SYS_CHFLAGS, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0)
+ _, _, e1 := syscall_syscall(libc_chflags_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_chflags_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_chflags chflags "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Chmod(path string, mode uint32) (err error) {
@@ -493,13 +645,17 @@ func Chmod(path string, mode uint32) (err error) {
if err != nil {
return
}
- _, _, e1 := Syscall(SYS_CHMOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0)
+ _, _, e1 := syscall_syscall(libc_chmod_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_chmod_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_chmod chmod "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Chown(path string, uid int, gid int) (err error) {
@@ -508,13 +664,17 @@ func Chown(path string, uid int, gid int) (err error) {
if err != nil {
return
}
- _, _, e1 := Syscall(SYS_CHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid))
+ _, _, e1 := syscall_syscall(libc_chown_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_chown_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_chown chown "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Chroot(path string) (err error) {
@@ -523,27 +683,35 @@ func Chroot(path string) (err error) {
if err != nil {
return
}
- _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0)
+ _, _, e1 := syscall_syscall(libc_chroot_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_chroot_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_chroot chroot "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Close(fd int) (err error) {
- _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0)
+ _, _, e1 := syscall_syscall(libc_close_trampoline_addr, uintptr(fd), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_close_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_close close "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Dup(fd int) (nfd int, err error) {
- r0, _, e1 := Syscall(SYS_DUP, uintptr(fd), 0, 0)
+ r0, _, e1 := syscall_syscall(libc_dup_trampoline_addr, uintptr(fd), 0, 0)
nfd = int(r0)
if e1 != 0 {
err = errnoErr(e1)
@@ -551,33 +719,49 @@ func Dup(fd int) (nfd int, err error) {
return
}
+var libc_dup_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_dup dup "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Dup2(from int, to int) (err error) {
- _, _, e1 := Syscall(SYS_DUP2, uintptr(from), uintptr(to), 0)
+ _, _, e1 := syscall_syscall(libc_dup2_trampoline_addr, uintptr(from), uintptr(to), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_dup2_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_dup2 dup2 "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Dup3(from int, to int, flags int) (err error) {
- _, _, e1 := Syscall(SYS_DUP3, uintptr(from), uintptr(to), uintptr(flags))
+ _, _, e1 := syscall_syscall(libc_dup3_trampoline_addr, uintptr(from), uintptr(to), uintptr(flags))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_dup3_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_dup3 dup3 "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Exit(code int) {
- Syscall(SYS_EXIT, uintptr(code), 0, 0)
+ syscall_syscall(libc_exit_trampoline_addr, uintptr(code), 0, 0)
return
}
+var libc_exit_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_exit exit "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) {
@@ -586,43 +770,59 @@ func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) {
if err != nil {
return
}
- _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0)
+ _, _, e1 := syscall_syscall6(libc_faccessat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_faccessat_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_faccessat faccessat "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Fchdir(fd int) (err error) {
- _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0)
+ _, _, e1 := syscall_syscall(libc_fchdir_trampoline_addr, uintptr(fd), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_fchdir_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_fchdir fchdir "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Fchflags(fd int, flags int) (err error) {
- _, _, e1 := Syscall(SYS_FCHFLAGS, uintptr(fd), uintptr(flags), 0)
+ _, _, e1 := syscall_syscall(libc_fchflags_trampoline_addr, uintptr(fd), uintptr(flags), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_fchflags_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_fchflags fchflags "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Fchmod(fd int, mode uint32) (err error) {
- _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0)
+ _, _, e1 := syscall_syscall(libc_fchmod_trampoline_addr, uintptr(fd), uintptr(mode), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_fchmod_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_fchmod fchmod "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) {
@@ -631,23 +831,31 @@ func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) {
if err != nil {
return
}
- _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0)
+ _, _, e1 := syscall_syscall6(libc_fchmodat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_fchmodat_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_fchmodat fchmodat "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Fchown(fd int, uid int, gid int) (err error) {
- _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid))
+ _, _, e1 := syscall_syscall(libc_fchown_trampoline_addr, uintptr(fd), uintptr(uid), uintptr(gid))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_fchown_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_fchown fchown "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) {
@@ -656,27 +864,35 @@ func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) {
if err != nil {
return
}
- _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0)
+ _, _, e1 := syscall_syscall6(libc_fchownat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_fchownat_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_fchownat fchownat "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Flock(fd int, how int) (err error) {
- _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0)
+ _, _, e1 := syscall_syscall(libc_flock_trampoline_addr, uintptr(fd), uintptr(how), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_flock_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_flock flock "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Fpathconf(fd int, name int) (val int, err error) {
- r0, _, e1 := Syscall(SYS_FPATHCONF, uintptr(fd), uintptr(name), 0)
+ r0, _, e1 := syscall_syscall(libc_fpathconf_trampoline_addr, uintptr(fd), uintptr(name), 0)
val = int(r0)
if e1 != 0 {
err = errnoErr(e1)
@@ -684,16 +900,24 @@ func Fpathconf(fd int, name int) (val int, err error) {
return
}
+var libc_fpathconf_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_fpathconf fpathconf "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Fstat(fd int, stat *Stat_t) (err error) {
- _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0)
+ _, _, e1 := syscall_syscall(libc_fstat_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_fstat_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_fstat fstat "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) {
@@ -702,71 +926,99 @@ func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) {
if err != nil {
return
}
- _, _, e1 := Syscall6(SYS_FSTATAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0)
+ _, _, e1 := syscall_syscall6(libc_fstatat_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_fstatat_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_fstatat fstatat "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Fstatfs(fd int, stat *Statfs_t) (err error) {
- _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0)
+ _, _, e1 := syscall_syscall(libc_fstatfs_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_fstatfs_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_fstatfs fstatfs "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Fsync(fd int) (err error) {
- _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0)
+ _, _, e1 := syscall_syscall(libc_fsync_trampoline_addr, uintptr(fd), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_fsync_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_fsync fsync "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Ftruncate(fd int, length int64) (err error) {
- _, _, e1 := Syscall6(SYS_FTRUNCATE, uintptr(fd), 0, uintptr(length), uintptr(length>>32), 0, 0)
+ _, _, e1 := syscall_syscall6(libc_ftruncate_trampoline_addr, uintptr(fd), 0, uintptr(length), uintptr(length>>32), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_ftruncate_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_ftruncate ftruncate "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getegid() (egid int) {
- r0, _, _ := RawSyscall(SYS_GETEGID, 0, 0, 0)
+ r0, _, _ := syscall_rawSyscall(libc_getegid_trampoline_addr, 0, 0, 0)
egid = int(r0)
return
}
+var libc_getegid_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_getegid getegid "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Geteuid() (uid int) {
- r0, _, _ := RawSyscall(SYS_GETEUID, 0, 0, 0)
+ r0, _, _ := syscall_rawSyscall(libc_geteuid_trampoline_addr, 0, 0, 0)
uid = int(r0)
return
}
+var libc_geteuid_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_geteuid geteuid "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getgid() (gid int) {
- r0, _, _ := RawSyscall(SYS_GETGID, 0, 0, 0)
+ r0, _, _ := syscall_rawSyscall(libc_getgid_trampoline_addr, 0, 0, 0)
gid = int(r0)
return
}
+var libc_getgid_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_getgid getgid "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getpgid(pid int) (pgid int, err error) {
- r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0)
+ r0, _, e1 := syscall_rawSyscall(libc_getpgid_trampoline_addr, uintptr(pid), 0, 0)
pgid = int(r0)
if e1 != 0 {
err = errnoErr(e1)
@@ -774,34 +1026,50 @@ func Getpgid(pid int) (pgid int, err error) {
return
}
+var libc_getpgid_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_getpgid getpgid "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getpgrp() (pgrp int) {
- r0, _, _ := RawSyscall(SYS_GETPGRP, 0, 0, 0)
+ r0, _, _ := syscall_rawSyscall(libc_getpgrp_trampoline_addr, 0, 0, 0)
pgrp = int(r0)
return
}
+var libc_getpgrp_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_getpgrp getpgrp "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getpid() (pid int) {
- r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0)
+ r0, _, _ := syscall_rawSyscall(libc_getpid_trampoline_addr, 0, 0, 0)
pid = int(r0)
return
}
+var libc_getpid_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_getpid getpid "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getppid() (ppid int) {
- r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0)
+ r0, _, _ := syscall_rawSyscall(libc_getppid_trampoline_addr, 0, 0, 0)
ppid = int(r0)
return
}
+var libc_getppid_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_getppid getppid "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getpriority(which int, who int) (prio int, err error) {
- r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0)
+ r0, _, e1 := syscall_syscall(libc_getpriority_trampoline_addr, uintptr(which), uintptr(who), 0)
prio = int(r0)
if e1 != 0 {
err = errnoErr(e1)
@@ -809,20 +1077,28 @@ func Getpriority(which int, who int) (prio int, err error) {
return
}
+var libc_getpriority_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_getpriority getpriority "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getrlimit(which int, lim *Rlimit) (err error) {
- _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0)
+ _, _, e1 := syscall_rawSyscall(libc_getrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_getrlimit_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_getrlimit getrlimit "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getrtable() (rtable int, err error) {
- r0, _, e1 := RawSyscall(SYS_GETRTABLE, 0, 0, 0)
+ r0, _, e1 := syscall_rawSyscall(libc_getrtable_trampoline_addr, 0, 0, 0)
rtable = int(r0)
if e1 != 0 {
err = errnoErr(e1)
@@ -830,20 +1106,28 @@ func Getrtable() (rtable int, err error) {
return
}
+var libc_getrtable_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_getrtable getrtable "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getrusage(who int, rusage *Rusage) (err error) {
- _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0)
+ _, _, e1 := syscall_rawSyscall(libc_getrusage_trampoline_addr, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_getrusage_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_getrusage getrusage "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getsid(pid int) (sid int, err error) {
- r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0)
+ r0, _, e1 := syscall_rawSyscall(libc_getsid_trampoline_addr, uintptr(pid), 0, 0)
sid = int(r0)
if e1 != 0 {
err = errnoErr(e1)
@@ -851,46 +1135,66 @@ func Getsid(pid int) (sid int, err error) {
return
}
+var libc_getsid_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_getsid getsid "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Gettimeofday(tv *Timeval) (err error) {
- _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0)
+ _, _, e1 := syscall_rawSyscall(libc_gettimeofday_trampoline_addr, uintptr(unsafe.Pointer(tv)), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_gettimeofday_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_gettimeofday gettimeofday "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Getuid() (uid int) {
- r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0)
+ r0, _, _ := syscall_rawSyscall(libc_getuid_trampoline_addr, 0, 0, 0)
uid = int(r0)
return
}
+var libc_getuid_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_getuid getuid "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Issetugid() (tainted bool) {
- r0, _, _ := Syscall(SYS_ISSETUGID, 0, 0, 0)
+ r0, _, _ := syscall_syscall(libc_issetugid_trampoline_addr, 0, 0, 0)
tainted = bool(r0 != 0)
return
}
+var libc_issetugid_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_issetugid issetugid "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Kill(pid int, signum syscall.Signal) (err error) {
- _, _, e1 := Syscall(SYS_KILL, uintptr(pid), uintptr(signum), 0)
+ _, _, e1 := syscall_syscall(libc_kill_trampoline_addr, uintptr(pid), uintptr(signum), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_kill_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_kill kill "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Kqueue() (fd int, err error) {
- r0, _, e1 := Syscall(SYS_KQUEUE, 0, 0, 0)
+ r0, _, e1 := syscall_syscall(libc_kqueue_trampoline_addr, 0, 0, 0)
fd = int(r0)
if e1 != 0 {
err = errnoErr(e1)
@@ -898,6 +1202,10 @@ func Kqueue() (fd int, err error) {
return
}
+var libc_kqueue_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_kqueue kqueue "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Lchown(path string, uid int, gid int) (err error) {
@@ -906,13 +1214,17 @@ func Lchown(path string, uid int, gid int) (err error) {
if err != nil {
return
}
- _, _, e1 := Syscall(SYS_LCHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid))
+ _, _, e1 := syscall_syscall(libc_lchown_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_lchown_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_lchown lchown "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Link(path string, link string) (err error) {
@@ -926,13 +1238,17 @@ func Link(path string, link string) (err error) {
if err != nil {
return
}
- _, _, e1 := Syscall(SYS_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0)
+ _, _, e1 := syscall_syscall(libc_link_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_link_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_link link "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err error) {
@@ -946,23 +1262,31 @@ func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err er
if err != nil {
return
}
- _, _, e1 := Syscall6(SYS_LINKAT, uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0)
+ _, _, e1 := syscall_syscall6(libc_linkat_trampoline_addr, uintptr(pathfd), uintptr(unsafe.Pointer(_p0)), uintptr(linkfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_linkat_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_linkat linkat "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Listen(s int, backlog int) (err error) {
- _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(backlog), 0)
+ _, _, e1 := syscall_syscall(libc_listen_trampoline_addr, uintptr(s), uintptr(backlog), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_listen_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_listen listen "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Lstat(path string, stat *Stat_t) (err error) {
@@ -971,13 +1295,17 @@ func Lstat(path string, stat *Stat_t) (err error) {
if err != nil {
return
}
- _, _, e1 := Syscall(SYS_LSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0)
+ _, _, e1 := syscall_syscall(libc_lstat_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_lstat_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_lstat lstat "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Mkdir(path string, mode uint32) (err error) {
@@ -986,13 +1314,17 @@ func Mkdir(path string, mode uint32) (err error) {
if err != nil {
return
}
- _, _, e1 := Syscall(SYS_MKDIR, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0)
+ _, _, e1 := syscall_syscall(libc_mkdir_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_mkdir_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_mkdir mkdir "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Mkdirat(dirfd int, path string, mode uint32) (err error) {
@@ -1001,13 +1333,17 @@ func Mkdirat(dirfd int, path string, mode uint32) (err error) {
if err != nil {
return
}
- _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode))
+ _, _, e1 := syscall_syscall(libc_mkdirat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_mkdirat_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_mkdirat mkdirat "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Mkfifo(path string, mode uint32) (err error) {
@@ -1016,13 +1352,17 @@ func Mkfifo(path string, mode uint32) (err error) {
if err != nil {
return
}
- _, _, e1 := Syscall(SYS_MKFIFO, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0)
+ _, _, e1 := syscall_syscall(libc_mkfifo_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_mkfifo_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_mkfifo mkfifo "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Mkfifoat(dirfd int, path string, mode uint32) (err error) {
@@ -1031,13 +1371,17 @@ func Mkfifoat(dirfd int, path string, mode uint32) (err error) {
if err != nil {
return
}
- _, _, e1 := Syscall(SYS_MKFIFOAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode))
+ _, _, e1 := syscall_syscall(libc_mkfifoat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_mkfifoat_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_mkfifoat mkfifoat "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Mknod(path string, mode uint32, dev int) (err error) {
@@ -1046,13 +1390,17 @@ func Mknod(path string, mode uint32, dev int) (err error) {
if err != nil {
return
}
- _, _, e1 := Syscall(SYS_MKNOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev))
+ _, _, e1 := syscall_syscall(libc_mknod_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_mknod_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_mknod mknod "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) {
@@ -1061,23 +1409,31 @@ func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) {
if err != nil {
return
}
- _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0)
+ _, _, e1 := syscall_syscall6(libc_mknodat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_mknodat_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_mknodat mknodat "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Nanosleep(time *Timespec, leftover *Timespec) (err error) {
- _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0)
+ _, _, e1 := syscall_syscall(libc_nanosleep_trampoline_addr, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_nanosleep_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_nanosleep nanosleep "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Open(path string, mode int, perm uint32) (fd int, err error) {
@@ -1086,7 +1442,7 @@ func Open(path string, mode int, perm uint32) (fd int, err error) {
if err != nil {
return
}
- r0, _, e1 := Syscall(SYS_OPEN, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm))
+ r0, _, e1 := syscall_syscall(libc_open_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm))
fd = int(r0)
if e1 != 0 {
err = errnoErr(e1)
@@ -1094,6 +1450,10 @@ func Open(path string, mode int, perm uint32) (fd int, err error) {
return
}
+var libc_open_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_open open "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) {
@@ -1102,7 +1462,7 @@ func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) {
if err != nil {
return
}
- r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0)
+ r0, _, e1 := syscall_syscall6(libc_openat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0)
fd = int(r0)
if e1 != 0 {
err = errnoErr(e1)
@@ -1110,6 +1470,10 @@ func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) {
return
}
+var libc_openat_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_openat openat "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Pathconf(path string, name int) (val int, err error) {
@@ -1118,7 +1482,7 @@ func Pathconf(path string, name int) (val int, err error) {
if err != nil {
return
}
- r0, _, e1 := Syscall(SYS_PATHCONF, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0)
+ r0, _, e1 := syscall_syscall(libc_pathconf_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0)
val = int(r0)
if e1 != 0 {
err = errnoErr(e1)
@@ -1126,6 +1490,10 @@ func Pathconf(path string, name int) (val int, err error) {
return
}
+var libc_pathconf_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_pathconf pathconf "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func pread(fd int, p []byte, offset int64) (n int, err error) {
@@ -1135,7 +1503,7 @@ func pread(fd int, p []byte, offset int64) (n int, err error) {
} else {
_p0 = unsafe.Pointer(&_zero)
}
- r0, _, e1 := Syscall6(SYS_PREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), uintptr(offset>>32))
+ r0, _, e1 := syscall_syscall6(libc_pread_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), uintptr(offset>>32))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
@@ -1143,6 +1511,10 @@ func pread(fd int, p []byte, offset int64) (n int, err error) {
return
}
+var libc_pread_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_pread pread "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func pwrite(fd int, p []byte, offset int64) (n int, err error) {
@@ -1152,7 +1524,7 @@ func pwrite(fd int, p []byte, offset int64) (n int, err error) {
} else {
_p0 = unsafe.Pointer(&_zero)
}
- r0, _, e1 := Syscall6(SYS_PWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), uintptr(offset>>32))
+ r0, _, e1 := syscall_syscall6(libc_pwrite_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p)), 0, uintptr(offset), uintptr(offset>>32))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
@@ -1160,6 +1532,10 @@ func pwrite(fd int, p []byte, offset int64) (n int, err error) {
return
}
+var libc_pwrite_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_pwrite pwrite "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func read(fd int, p []byte) (n int, err error) {
@@ -1169,7 +1545,7 @@ func read(fd int, p []byte) (n int, err error) {
} else {
_p0 = unsafe.Pointer(&_zero)
}
- r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p)))
+ r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p)))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
@@ -1177,6 +1553,10 @@ func read(fd int, p []byte) (n int, err error) {
return
}
+var libc_read_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_read read "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Readlink(path string, buf []byte) (n int, err error) {
@@ -1191,7 +1571,7 @@ func Readlink(path string, buf []byte) (n int, err error) {
} else {
_p1 = unsafe.Pointer(&_zero)
}
- r0, _, e1 := Syscall(SYS_READLINK, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)))
+ r0, _, e1 := syscall_syscall(libc_readlink_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
@@ -1199,6 +1579,10 @@ func Readlink(path string, buf []byte) (n int, err error) {
return
}
+var libc_readlink_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_readlink readlink "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) {
@@ -1213,7 +1597,7 @@ func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) {
} else {
_p1 = unsafe.Pointer(&_zero)
}
- r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0)
+ r0, _, e1 := syscall_syscall6(libc_readlinkat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0)
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
@@ -1221,6 +1605,10 @@ func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) {
return
}
+var libc_readlinkat_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_readlinkat readlinkat "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Rename(from string, to string) (err error) {
@@ -1234,13 +1622,17 @@ func Rename(from string, to string) (err error) {
if err != nil {
return
}
- _, _, e1 := Syscall(SYS_RENAME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0)
+ _, _, e1 := syscall_syscall(libc_rename_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_rename_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_rename rename "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Renameat(fromfd int, from string, tofd int, to string) (err error) {
@@ -1254,13 +1646,17 @@ func Renameat(fromfd int, from string, tofd int, to string) (err error) {
if err != nil {
return
}
- _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0)
+ _, _, e1 := syscall_syscall6(libc_renameat_trampoline_addr, uintptr(fromfd), uintptr(unsafe.Pointer(_p0)), uintptr(tofd), uintptr(unsafe.Pointer(_p1)), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_renameat_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_renameat renameat "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Revoke(path string) (err error) {
@@ -1269,13 +1665,17 @@ func Revoke(path string) (err error) {
if err != nil {
return
}
- _, _, e1 := Syscall(SYS_REVOKE, uintptr(unsafe.Pointer(_p0)), 0, 0)
+ _, _, e1 := syscall_syscall(libc_revoke_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_revoke_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_revoke revoke "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Rmdir(path string) (err error) {
@@ -1284,17 +1684,21 @@ func Rmdir(path string) (err error) {
if err != nil {
return
}
- _, _, e1 := Syscall(SYS_RMDIR, uintptr(unsafe.Pointer(_p0)), 0, 0)
+ _, _, e1 := syscall_syscall(libc_rmdir_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_rmdir_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_rmdir rmdir "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Seek(fd int, offset int64, whence int) (newoffset int64, err error) {
- r0, r1, e1 := Syscall6(SYS_LSEEK, uintptr(fd), 0, uintptr(offset), uintptr(offset>>32), uintptr(whence), 0)
+ r0, r1, e1 := syscall_syscall6(libc_lseek_trampoline_addr, uintptr(fd), 0, uintptr(offset), uintptr(offset>>32), uintptr(whence), 0)
newoffset = int64(int64(r1)<<32 | int64(r0))
if e1 != 0 {
err = errnoErr(e1)
@@ -1302,10 +1706,14 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) {
return
}
+var libc_lseek_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_lseek lseek "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) {
- r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0)
+ r0, _, e1 := syscall_syscall6(libc_select_trampoline_addr, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0)
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
@@ -1313,36 +1721,52 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err
return
}
+var libc_select_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_select select "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Setegid(egid int) (err error) {
- _, _, e1 := RawSyscall(SYS_SETEGID, uintptr(egid), 0, 0)
+ _, _, e1 := syscall_rawSyscall(libc_setegid_trampoline_addr, uintptr(egid), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_setegid_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_setegid setegid "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Seteuid(euid int) (err error) {
- _, _, e1 := RawSyscall(SYS_SETEUID, uintptr(euid), 0, 0)
+ _, _, e1 := syscall_rawSyscall(libc_seteuid_trampoline_addr, uintptr(euid), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_seteuid_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_seteuid seteuid "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Setgid(gid int) (err error) {
- _, _, e1 := RawSyscall(SYS_SETGID, uintptr(gid), 0, 0)
+ _, _, e1 := syscall_rawSyscall(libc_setgid_trampoline_addr, uintptr(gid), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_setgid_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_setgid setgid "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Setlogin(name string) (err error) {
@@ -1351,97 +1775,133 @@ func Setlogin(name string) (err error) {
if err != nil {
return
}
- _, _, e1 := Syscall(SYS_SETLOGIN, uintptr(unsafe.Pointer(_p0)), 0, 0)
+ _, _, e1 := syscall_syscall(libc_setlogin_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_setlogin_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_setlogin setlogin "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Setpgid(pid int, pgid int) (err error) {
- _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0)
+ _, _, e1 := syscall_rawSyscall(libc_setpgid_trampoline_addr, uintptr(pid), uintptr(pgid), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_setpgid_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_setpgid setpgid "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Setpriority(which int, who int, prio int) (err error) {
- _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio))
+ _, _, e1 := syscall_syscall(libc_setpriority_trampoline_addr, uintptr(which), uintptr(who), uintptr(prio))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_setpriority_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_setpriority setpriority "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Setregid(rgid int, egid int) (err error) {
- _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0)
+ _, _, e1 := syscall_rawSyscall(libc_setregid_trampoline_addr, uintptr(rgid), uintptr(egid), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_setregid_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_setregid setregid "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Setreuid(ruid int, euid int) (err error) {
- _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0)
+ _, _, e1 := syscall_rawSyscall(libc_setreuid_trampoline_addr, uintptr(ruid), uintptr(euid), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_setreuid_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_setreuid setreuid "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Setresgid(rgid int, egid int, sgid int) (err error) {
- _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid))
+ _, _, e1 := syscall_rawSyscall(libc_setresgid_trampoline_addr, uintptr(rgid), uintptr(egid), uintptr(sgid))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_setresgid_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_setresgid setresgid "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Setresuid(ruid int, euid int, suid int) (err error) {
- _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid))
+ _, _, e1 := syscall_rawSyscall(libc_setresuid_trampoline_addr, uintptr(ruid), uintptr(euid), uintptr(suid))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_setresuid_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_setresuid setresuid "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Setrlimit(which int, lim *Rlimit) (err error) {
- _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0)
+ _, _, e1 := syscall_rawSyscall(libc_setrlimit_trampoline_addr, uintptr(which), uintptr(unsafe.Pointer(lim)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_setrlimit_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_setrlimit setrlimit "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Setrtable(rtable int) (err error) {
- _, _, e1 := RawSyscall(SYS_SETRTABLE, uintptr(rtable), 0, 0)
+ _, _, e1 := syscall_rawSyscall(libc_setrtable_trampoline_addr, uintptr(rtable), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_setrtable_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_setrtable setrtable "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Setsid() (pid int, err error) {
- r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0)
+ r0, _, e1 := syscall_rawSyscall(libc_setsid_trampoline_addr, 0, 0, 0)
pid = int(r0)
if e1 != 0 {
err = errnoErr(e1)
@@ -1449,26 +1909,38 @@ func Setsid() (pid int, err error) {
return
}
+var libc_setsid_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_setsid setsid "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Settimeofday(tp *Timeval) (err error) {
- _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0)
+ _, _, e1 := syscall_rawSyscall(libc_settimeofday_trampoline_addr, uintptr(unsafe.Pointer(tp)), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_settimeofday_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_settimeofday settimeofday "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Setuid(uid int) (err error) {
- _, _, e1 := RawSyscall(SYS_SETUID, uintptr(uid), 0, 0)
+ _, _, e1 := syscall_rawSyscall(libc_setuid_trampoline_addr, uintptr(uid), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_setuid_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_setuid setuid "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Stat(path string, stat *Stat_t) (err error) {
@@ -1477,13 +1949,17 @@ func Stat(path string, stat *Stat_t) (err error) {
if err != nil {
return
}
- _, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0)
+ _, _, e1 := syscall_syscall(libc_stat_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_stat_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_stat stat "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Statfs(path string, stat *Statfs_t) (err error) {
@@ -1492,13 +1968,17 @@ func Statfs(path string, stat *Statfs_t) (err error) {
if err != nil {
return
}
- _, _, e1 := Syscall(SYS_STATFS, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0)
+ _, _, e1 := syscall_syscall(libc_statfs_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_statfs_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_statfs statfs "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Symlink(path string, link string) (err error) {
@@ -1512,13 +1992,17 @@ func Symlink(path string, link string) (err error) {
if err != nil {
return
}
- _, _, e1 := Syscall(SYS_SYMLINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0)
+ _, _, e1 := syscall_syscall(libc_symlink_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_symlink_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_symlink symlink "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) {
@@ -1532,23 +2016,31 @@ func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) {
if err != nil {
return
}
- _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)))
+ _, _, e1 := syscall_syscall(libc_symlinkat_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_symlinkat_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_symlinkat symlinkat "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Sync() (err error) {
- _, _, e1 := Syscall(SYS_SYNC, 0, 0, 0)
+ _, _, e1 := syscall_syscall(libc_sync_trampoline_addr, 0, 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_sync_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_sync sync "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Truncate(path string, length int64) (err error) {
@@ -1557,21 +2049,29 @@ func Truncate(path string, length int64) (err error) {
if err != nil {
return
}
- _, _, e1 := Syscall6(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), 0, uintptr(length), uintptr(length>>32), 0, 0)
+ _, _, e1 := syscall_syscall6(libc_truncate_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, uintptr(length), uintptr(length>>32), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_truncate_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_truncate truncate "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Umask(newmask int) (oldmask int) {
- r0, _, _ := Syscall(SYS_UMASK, uintptr(newmask), 0, 0)
+ r0, _, _ := syscall_syscall(libc_umask_trampoline_addr, uintptr(newmask), 0, 0)
oldmask = int(r0)
return
}
+var libc_umask_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_umask umask "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Unlink(path string) (err error) {
@@ -1580,13 +2080,17 @@ func Unlink(path string) (err error) {
if err != nil {
return
}
- _, _, e1 := Syscall(SYS_UNLINK, uintptr(unsafe.Pointer(_p0)), 0, 0)
+ _, _, e1 := syscall_syscall(libc_unlink_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_unlink_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_unlink unlink "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Unlinkat(dirfd int, path string, flags int) (err error) {
@@ -1595,13 +2099,17 @@ func Unlinkat(dirfd int, path string, flags int) (err error) {
if err != nil {
return
}
- _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags))
+ _, _, e1 := syscall_syscall(libc_unlinkat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_unlinkat_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_unlinkat unlinkat "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Unmount(path string, flags int) (err error) {
@@ -1610,13 +2118,17 @@ func Unmount(path string, flags int) (err error) {
if err != nil {
return
}
- _, _, e1 := Syscall(SYS_UNMOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0)
+ _, _, e1 := syscall_syscall(libc_unmount_trampoline_addr, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_unmount_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_unmount unmount "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func write(fd int, p []byte) (n int, err error) {
@@ -1626,7 +2138,7 @@ func write(fd int, p []byte) (n int, err error) {
} else {
_p0 = unsafe.Pointer(&_zero)
}
- r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)))
+ r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(p)))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
@@ -1634,10 +2146,14 @@ func write(fd int, p []byte) (n int, err error) {
return
}
+var libc_write_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_write write "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) {
- r0, _, e1 := Syscall9(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), 0, uintptr(pos), uintptr(pos>>32), 0)
+ r0, _, e1 := syscall_syscall9(libc_mmap_trampoline_addr, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), 0, uintptr(pos), uintptr(pos>>32), 0)
ret = uintptr(r0)
if e1 != 0 {
err = errnoErr(e1)
@@ -1645,20 +2161,28 @@ func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (
return
}
+var libc_mmap_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_mmap mmap "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func munmap(addr uintptr, length uintptr) (err error) {
- _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0)
+ _, _, e1 := syscall_syscall(libc_munmap_trampoline_addr, uintptr(addr), uintptr(length), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+var libc_munmap_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_munmap munmap "libc.so"
+
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func readlen(fd int, buf *byte, nbuf int) (n int, err error) {
- r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf))
+ r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
@@ -1669,7 +2193,7 @@ func readlen(fd int, buf *byte, nbuf int) (n int, err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func writelen(fd int, buf *byte, nbuf int) (n int, err error) {
- r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf))
+ r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
@@ -1685,9 +2209,13 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error
if err != nil {
return
}
- _, _, e1 := Syscall6(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0)
+ _, _, e1 := syscall_syscall6(libc_utimensat_trampoline_addr, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flags), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
+
+var libc_utimensat_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_utimensat utimensat "libc.so"
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s
new file mode 100644
index 00000000000..9ad116d9fbd
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s
@@ -0,0 +1,796 @@
+// go run mkasm.go openbsd arm
+// Code generated by the command above; DO NOT EDIT.
+
+#include "textflag.h"
+
+TEXT libc_getgroups_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_getgroups(SB)
+
+GLOBL ·libc_getgroups_trampoline_addr(SB), RODATA, $4
+DATA ·libc_getgroups_trampoline_addr(SB)/4, $libc_getgroups_trampoline<>(SB)
+
+TEXT libc_setgroups_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_setgroups(SB)
+
+GLOBL ·libc_setgroups_trampoline_addr(SB), RODATA, $4
+DATA ·libc_setgroups_trampoline_addr(SB)/4, $libc_setgroups_trampoline<>(SB)
+
+TEXT libc_wait4_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_wait4(SB)
+
+GLOBL ·libc_wait4_trampoline_addr(SB), RODATA, $4
+DATA ·libc_wait4_trampoline_addr(SB)/4, $libc_wait4_trampoline<>(SB)
+
+TEXT libc_accept_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_accept(SB)
+
+GLOBL ·libc_accept_trampoline_addr(SB), RODATA, $4
+DATA ·libc_accept_trampoline_addr(SB)/4, $libc_accept_trampoline<>(SB)
+
+TEXT libc_bind_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_bind(SB)
+
+GLOBL ·libc_bind_trampoline_addr(SB), RODATA, $4
+DATA ·libc_bind_trampoline_addr(SB)/4, $libc_bind_trampoline<>(SB)
+
+TEXT libc_connect_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_connect(SB)
+
+GLOBL ·libc_connect_trampoline_addr(SB), RODATA, $4
+DATA ·libc_connect_trampoline_addr(SB)/4, $libc_connect_trampoline<>(SB)
+
+TEXT libc_socket_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_socket(SB)
+
+GLOBL ·libc_socket_trampoline_addr(SB), RODATA, $4
+DATA ·libc_socket_trampoline_addr(SB)/4, $libc_socket_trampoline<>(SB)
+
+TEXT libc_getsockopt_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_getsockopt(SB)
+
+GLOBL ·libc_getsockopt_trampoline_addr(SB), RODATA, $4
+DATA ·libc_getsockopt_trampoline_addr(SB)/4, $libc_getsockopt_trampoline<>(SB)
+
+TEXT libc_setsockopt_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_setsockopt(SB)
+
+GLOBL ·libc_setsockopt_trampoline_addr(SB), RODATA, $4
+DATA ·libc_setsockopt_trampoline_addr(SB)/4, $libc_setsockopt_trampoline<>(SB)
+
+TEXT libc_getpeername_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_getpeername(SB)
+
+GLOBL ·libc_getpeername_trampoline_addr(SB), RODATA, $4
+DATA ·libc_getpeername_trampoline_addr(SB)/4, $libc_getpeername_trampoline<>(SB)
+
+TEXT libc_getsockname_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_getsockname(SB)
+
+GLOBL ·libc_getsockname_trampoline_addr(SB), RODATA, $4
+DATA ·libc_getsockname_trampoline_addr(SB)/4, $libc_getsockname_trampoline<>(SB)
+
+TEXT libc_shutdown_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_shutdown(SB)
+
+GLOBL ·libc_shutdown_trampoline_addr(SB), RODATA, $4
+DATA ·libc_shutdown_trampoline_addr(SB)/4, $libc_shutdown_trampoline<>(SB)
+
+TEXT libc_socketpair_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_socketpair(SB)
+
+GLOBL ·libc_socketpair_trampoline_addr(SB), RODATA, $4
+DATA ·libc_socketpair_trampoline_addr(SB)/4, $libc_socketpair_trampoline<>(SB)
+
+TEXT libc_recvfrom_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_recvfrom(SB)
+
+GLOBL ·libc_recvfrom_trampoline_addr(SB), RODATA, $4
+DATA ·libc_recvfrom_trampoline_addr(SB)/4, $libc_recvfrom_trampoline<>(SB)
+
+TEXT libc_sendto_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_sendto(SB)
+
+GLOBL ·libc_sendto_trampoline_addr(SB), RODATA, $4
+DATA ·libc_sendto_trampoline_addr(SB)/4, $libc_sendto_trampoline<>(SB)
+
+TEXT libc_recvmsg_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_recvmsg(SB)
+
+GLOBL ·libc_recvmsg_trampoline_addr(SB), RODATA, $4
+DATA ·libc_recvmsg_trampoline_addr(SB)/4, $libc_recvmsg_trampoline<>(SB)
+
+TEXT libc_sendmsg_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_sendmsg(SB)
+
+GLOBL ·libc_sendmsg_trampoline_addr(SB), RODATA, $4
+DATA ·libc_sendmsg_trampoline_addr(SB)/4, $libc_sendmsg_trampoline<>(SB)
+
+TEXT libc_kevent_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_kevent(SB)
+
+GLOBL ·libc_kevent_trampoline_addr(SB), RODATA, $4
+DATA ·libc_kevent_trampoline_addr(SB)/4, $libc_kevent_trampoline<>(SB)
+
+TEXT libc_utimes_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_utimes(SB)
+
+GLOBL ·libc_utimes_trampoline_addr(SB), RODATA, $4
+DATA ·libc_utimes_trampoline_addr(SB)/4, $libc_utimes_trampoline<>(SB)
+
+TEXT libc_futimes_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_futimes(SB)
+
+GLOBL ·libc_futimes_trampoline_addr(SB), RODATA, $4
+DATA ·libc_futimes_trampoline_addr(SB)/4, $libc_futimes_trampoline<>(SB)
+
+TEXT libc_poll_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_poll(SB)
+
+GLOBL ·libc_poll_trampoline_addr(SB), RODATA, $4
+DATA ·libc_poll_trampoline_addr(SB)/4, $libc_poll_trampoline<>(SB)
+
+TEXT libc_madvise_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_madvise(SB)
+
+GLOBL ·libc_madvise_trampoline_addr(SB), RODATA, $4
+DATA ·libc_madvise_trampoline_addr(SB)/4, $libc_madvise_trampoline<>(SB)
+
+TEXT libc_mlock_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_mlock(SB)
+
+GLOBL ·libc_mlock_trampoline_addr(SB), RODATA, $4
+DATA ·libc_mlock_trampoline_addr(SB)/4, $libc_mlock_trampoline<>(SB)
+
+TEXT libc_mlockall_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_mlockall(SB)
+
+GLOBL ·libc_mlockall_trampoline_addr(SB), RODATA, $4
+DATA ·libc_mlockall_trampoline_addr(SB)/4, $libc_mlockall_trampoline<>(SB)
+
+TEXT libc_mprotect_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_mprotect(SB)
+
+GLOBL ·libc_mprotect_trampoline_addr(SB), RODATA, $4
+DATA ·libc_mprotect_trampoline_addr(SB)/4, $libc_mprotect_trampoline<>(SB)
+
+TEXT libc_msync_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_msync(SB)
+
+GLOBL ·libc_msync_trampoline_addr(SB), RODATA, $4
+DATA ·libc_msync_trampoline_addr(SB)/4, $libc_msync_trampoline<>(SB)
+
+TEXT libc_munlock_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_munlock(SB)
+
+GLOBL ·libc_munlock_trampoline_addr(SB), RODATA, $4
+DATA ·libc_munlock_trampoline_addr(SB)/4, $libc_munlock_trampoline<>(SB)
+
+TEXT libc_munlockall_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_munlockall(SB)
+
+GLOBL ·libc_munlockall_trampoline_addr(SB), RODATA, $4
+DATA ·libc_munlockall_trampoline_addr(SB)/4, $libc_munlockall_trampoline<>(SB)
+
+TEXT libc_pipe2_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_pipe2(SB)
+
+GLOBL ·libc_pipe2_trampoline_addr(SB), RODATA, $4
+DATA ·libc_pipe2_trampoline_addr(SB)/4, $libc_pipe2_trampoline<>(SB)
+
+TEXT libc_getdents_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_getdents(SB)
+
+GLOBL ·libc_getdents_trampoline_addr(SB), RODATA, $4
+DATA ·libc_getdents_trampoline_addr(SB)/4, $libc_getdents_trampoline<>(SB)
+
+TEXT libc_getcwd_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_getcwd(SB)
+
+GLOBL ·libc_getcwd_trampoline_addr(SB), RODATA, $4
+DATA ·libc_getcwd_trampoline_addr(SB)/4, $libc_getcwd_trampoline<>(SB)
+
+TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_ioctl(SB)
+
+GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $4
+DATA ·libc_ioctl_trampoline_addr(SB)/4, $libc_ioctl_trampoline<>(SB)
+
+TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_sysctl(SB)
+
+GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $4
+DATA ·libc_sysctl_trampoline_addr(SB)/4, $libc_sysctl_trampoline<>(SB)
+
+TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_ppoll(SB)
+
+GLOBL ·libc_ppoll_trampoline_addr(SB), RODATA, $4
+DATA ·libc_ppoll_trampoline_addr(SB)/4, $libc_ppoll_trampoline<>(SB)
+
+TEXT libc_access_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_access(SB)
+
+GLOBL ·libc_access_trampoline_addr(SB), RODATA, $4
+DATA ·libc_access_trampoline_addr(SB)/4, $libc_access_trampoline<>(SB)
+
+TEXT libc_adjtime_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_adjtime(SB)
+
+GLOBL ·libc_adjtime_trampoline_addr(SB), RODATA, $4
+DATA ·libc_adjtime_trampoline_addr(SB)/4, $libc_adjtime_trampoline<>(SB)
+
+TEXT libc_chdir_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_chdir(SB)
+
+GLOBL ·libc_chdir_trampoline_addr(SB), RODATA, $4
+DATA ·libc_chdir_trampoline_addr(SB)/4, $libc_chdir_trampoline<>(SB)
+
+TEXT libc_chflags_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_chflags(SB)
+
+GLOBL ·libc_chflags_trampoline_addr(SB), RODATA, $4
+DATA ·libc_chflags_trampoline_addr(SB)/4, $libc_chflags_trampoline<>(SB)
+
+TEXT libc_chmod_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_chmod(SB)
+
+GLOBL ·libc_chmod_trampoline_addr(SB), RODATA, $4
+DATA ·libc_chmod_trampoline_addr(SB)/4, $libc_chmod_trampoline<>(SB)
+
+TEXT libc_chown_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_chown(SB)
+
+GLOBL ·libc_chown_trampoline_addr(SB), RODATA, $4
+DATA ·libc_chown_trampoline_addr(SB)/4, $libc_chown_trampoline<>(SB)
+
+TEXT libc_chroot_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_chroot(SB)
+
+GLOBL ·libc_chroot_trampoline_addr(SB), RODATA, $4
+DATA ·libc_chroot_trampoline_addr(SB)/4, $libc_chroot_trampoline<>(SB)
+
+TEXT libc_close_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_close(SB)
+
+GLOBL ·libc_close_trampoline_addr(SB), RODATA, $4
+DATA ·libc_close_trampoline_addr(SB)/4, $libc_close_trampoline<>(SB)
+
+TEXT libc_dup_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_dup(SB)
+
+GLOBL ·libc_dup_trampoline_addr(SB), RODATA, $4
+DATA ·libc_dup_trampoline_addr(SB)/4, $libc_dup_trampoline<>(SB)
+
+TEXT libc_dup2_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_dup2(SB)
+
+GLOBL ·libc_dup2_trampoline_addr(SB), RODATA, $4
+DATA ·libc_dup2_trampoline_addr(SB)/4, $libc_dup2_trampoline<>(SB)
+
+TEXT libc_dup3_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_dup3(SB)
+
+GLOBL ·libc_dup3_trampoline_addr(SB), RODATA, $4
+DATA ·libc_dup3_trampoline_addr(SB)/4, $libc_dup3_trampoline<>(SB)
+
+TEXT libc_exit_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_exit(SB)
+
+GLOBL ·libc_exit_trampoline_addr(SB), RODATA, $4
+DATA ·libc_exit_trampoline_addr(SB)/4, $libc_exit_trampoline<>(SB)
+
+TEXT libc_faccessat_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_faccessat(SB)
+
+GLOBL ·libc_faccessat_trampoline_addr(SB), RODATA, $4
+DATA ·libc_faccessat_trampoline_addr(SB)/4, $libc_faccessat_trampoline<>(SB)
+
+TEXT libc_fchdir_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_fchdir(SB)
+
+GLOBL ·libc_fchdir_trampoline_addr(SB), RODATA, $4
+DATA ·libc_fchdir_trampoline_addr(SB)/4, $libc_fchdir_trampoline<>(SB)
+
+TEXT libc_fchflags_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_fchflags(SB)
+
+GLOBL ·libc_fchflags_trampoline_addr(SB), RODATA, $4
+DATA ·libc_fchflags_trampoline_addr(SB)/4, $libc_fchflags_trampoline<>(SB)
+
+TEXT libc_fchmod_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_fchmod(SB)
+
+GLOBL ·libc_fchmod_trampoline_addr(SB), RODATA, $4
+DATA ·libc_fchmod_trampoline_addr(SB)/4, $libc_fchmod_trampoline<>(SB)
+
+TEXT libc_fchmodat_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_fchmodat(SB)
+
+GLOBL ·libc_fchmodat_trampoline_addr(SB), RODATA, $4
+DATA ·libc_fchmodat_trampoline_addr(SB)/4, $libc_fchmodat_trampoline<>(SB)
+
+TEXT libc_fchown_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_fchown(SB)
+
+GLOBL ·libc_fchown_trampoline_addr(SB), RODATA, $4
+DATA ·libc_fchown_trampoline_addr(SB)/4, $libc_fchown_trampoline<>(SB)
+
+TEXT libc_fchownat_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_fchownat(SB)
+
+GLOBL ·libc_fchownat_trampoline_addr(SB), RODATA, $4
+DATA ·libc_fchownat_trampoline_addr(SB)/4, $libc_fchownat_trampoline<>(SB)
+
+TEXT libc_flock_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_flock(SB)
+
+GLOBL ·libc_flock_trampoline_addr(SB), RODATA, $4
+DATA ·libc_flock_trampoline_addr(SB)/4, $libc_flock_trampoline<>(SB)
+
+TEXT libc_fpathconf_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_fpathconf(SB)
+
+GLOBL ·libc_fpathconf_trampoline_addr(SB), RODATA, $4
+DATA ·libc_fpathconf_trampoline_addr(SB)/4, $libc_fpathconf_trampoline<>(SB)
+
+TEXT libc_fstat_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_fstat(SB)
+
+GLOBL ·libc_fstat_trampoline_addr(SB), RODATA, $4
+DATA ·libc_fstat_trampoline_addr(SB)/4, $libc_fstat_trampoline<>(SB)
+
+TEXT libc_fstatat_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_fstatat(SB)
+
+GLOBL ·libc_fstatat_trampoline_addr(SB), RODATA, $4
+DATA ·libc_fstatat_trampoline_addr(SB)/4, $libc_fstatat_trampoline<>(SB)
+
+TEXT libc_fstatfs_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_fstatfs(SB)
+
+GLOBL ·libc_fstatfs_trampoline_addr(SB), RODATA, $4
+DATA ·libc_fstatfs_trampoline_addr(SB)/4, $libc_fstatfs_trampoline<>(SB)
+
+TEXT libc_fsync_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_fsync(SB)
+
+GLOBL ·libc_fsync_trampoline_addr(SB), RODATA, $4
+DATA ·libc_fsync_trampoline_addr(SB)/4, $libc_fsync_trampoline<>(SB)
+
+TEXT libc_ftruncate_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_ftruncate(SB)
+
+GLOBL ·libc_ftruncate_trampoline_addr(SB), RODATA, $4
+DATA ·libc_ftruncate_trampoline_addr(SB)/4, $libc_ftruncate_trampoline<>(SB)
+
+TEXT libc_getegid_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_getegid(SB)
+
+GLOBL ·libc_getegid_trampoline_addr(SB), RODATA, $4
+DATA ·libc_getegid_trampoline_addr(SB)/4, $libc_getegid_trampoline<>(SB)
+
+TEXT libc_geteuid_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_geteuid(SB)
+
+GLOBL ·libc_geteuid_trampoline_addr(SB), RODATA, $4
+DATA ·libc_geteuid_trampoline_addr(SB)/4, $libc_geteuid_trampoline<>(SB)
+
+TEXT libc_getgid_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_getgid(SB)
+
+GLOBL ·libc_getgid_trampoline_addr(SB), RODATA, $4
+DATA ·libc_getgid_trampoline_addr(SB)/4, $libc_getgid_trampoline<>(SB)
+
+TEXT libc_getpgid_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_getpgid(SB)
+
+GLOBL ·libc_getpgid_trampoline_addr(SB), RODATA, $4
+DATA ·libc_getpgid_trampoline_addr(SB)/4, $libc_getpgid_trampoline<>(SB)
+
+TEXT libc_getpgrp_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_getpgrp(SB)
+
+GLOBL ·libc_getpgrp_trampoline_addr(SB), RODATA, $4
+DATA ·libc_getpgrp_trampoline_addr(SB)/4, $libc_getpgrp_trampoline<>(SB)
+
+TEXT libc_getpid_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_getpid(SB)
+
+GLOBL ·libc_getpid_trampoline_addr(SB), RODATA, $4
+DATA ·libc_getpid_trampoline_addr(SB)/4, $libc_getpid_trampoline<>(SB)
+
+TEXT libc_getppid_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_getppid(SB)
+
+GLOBL ·libc_getppid_trampoline_addr(SB), RODATA, $4
+DATA ·libc_getppid_trampoline_addr(SB)/4, $libc_getppid_trampoline<>(SB)
+
+TEXT libc_getpriority_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_getpriority(SB)
+
+GLOBL ·libc_getpriority_trampoline_addr(SB), RODATA, $4
+DATA ·libc_getpriority_trampoline_addr(SB)/4, $libc_getpriority_trampoline<>(SB)
+
+TEXT libc_getrlimit_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_getrlimit(SB)
+
+GLOBL ·libc_getrlimit_trampoline_addr(SB), RODATA, $4
+DATA ·libc_getrlimit_trampoline_addr(SB)/4, $libc_getrlimit_trampoline<>(SB)
+
+TEXT libc_getrtable_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_getrtable(SB)
+
+GLOBL ·libc_getrtable_trampoline_addr(SB), RODATA, $4
+DATA ·libc_getrtable_trampoline_addr(SB)/4, $libc_getrtable_trampoline<>(SB)
+
+TEXT libc_getrusage_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_getrusage(SB)
+
+GLOBL ·libc_getrusage_trampoline_addr(SB), RODATA, $4
+DATA ·libc_getrusage_trampoline_addr(SB)/4, $libc_getrusage_trampoline<>(SB)
+
+TEXT libc_getsid_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_getsid(SB)
+
+GLOBL ·libc_getsid_trampoline_addr(SB), RODATA, $4
+DATA ·libc_getsid_trampoline_addr(SB)/4, $libc_getsid_trampoline<>(SB)
+
+TEXT libc_gettimeofday_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_gettimeofday(SB)
+
+GLOBL ·libc_gettimeofday_trampoline_addr(SB), RODATA, $4
+DATA ·libc_gettimeofday_trampoline_addr(SB)/4, $libc_gettimeofday_trampoline<>(SB)
+
+TEXT libc_getuid_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_getuid(SB)
+
+GLOBL ·libc_getuid_trampoline_addr(SB), RODATA, $4
+DATA ·libc_getuid_trampoline_addr(SB)/4, $libc_getuid_trampoline<>(SB)
+
+TEXT libc_issetugid_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_issetugid(SB)
+
+GLOBL ·libc_issetugid_trampoline_addr(SB), RODATA, $4
+DATA ·libc_issetugid_trampoline_addr(SB)/4, $libc_issetugid_trampoline<>(SB)
+
+TEXT libc_kill_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_kill(SB)
+
+GLOBL ·libc_kill_trampoline_addr(SB), RODATA, $4
+DATA ·libc_kill_trampoline_addr(SB)/4, $libc_kill_trampoline<>(SB)
+
+TEXT libc_kqueue_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_kqueue(SB)
+
+GLOBL ·libc_kqueue_trampoline_addr(SB), RODATA, $4
+DATA ·libc_kqueue_trampoline_addr(SB)/4, $libc_kqueue_trampoline<>(SB)
+
+TEXT libc_lchown_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_lchown(SB)
+
+GLOBL ·libc_lchown_trampoline_addr(SB), RODATA, $4
+DATA ·libc_lchown_trampoline_addr(SB)/4, $libc_lchown_trampoline<>(SB)
+
+TEXT libc_link_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_link(SB)
+
+GLOBL ·libc_link_trampoline_addr(SB), RODATA, $4
+DATA ·libc_link_trampoline_addr(SB)/4, $libc_link_trampoline<>(SB)
+
+TEXT libc_linkat_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_linkat(SB)
+
+GLOBL ·libc_linkat_trampoline_addr(SB), RODATA, $4
+DATA ·libc_linkat_trampoline_addr(SB)/4, $libc_linkat_trampoline<>(SB)
+
+TEXT libc_listen_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_listen(SB)
+
+GLOBL ·libc_listen_trampoline_addr(SB), RODATA, $4
+DATA ·libc_listen_trampoline_addr(SB)/4, $libc_listen_trampoline<>(SB)
+
+TEXT libc_lstat_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_lstat(SB)
+
+GLOBL ·libc_lstat_trampoline_addr(SB), RODATA, $4
+DATA ·libc_lstat_trampoline_addr(SB)/4, $libc_lstat_trampoline<>(SB)
+
+TEXT libc_mkdir_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_mkdir(SB)
+
+GLOBL ·libc_mkdir_trampoline_addr(SB), RODATA, $4
+DATA ·libc_mkdir_trampoline_addr(SB)/4, $libc_mkdir_trampoline<>(SB)
+
+TEXT libc_mkdirat_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_mkdirat(SB)
+
+GLOBL ·libc_mkdirat_trampoline_addr(SB), RODATA, $4
+DATA ·libc_mkdirat_trampoline_addr(SB)/4, $libc_mkdirat_trampoline<>(SB)
+
+TEXT libc_mkfifo_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_mkfifo(SB)
+
+GLOBL ·libc_mkfifo_trampoline_addr(SB), RODATA, $4
+DATA ·libc_mkfifo_trampoline_addr(SB)/4, $libc_mkfifo_trampoline<>(SB)
+
+TEXT libc_mkfifoat_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_mkfifoat(SB)
+
+GLOBL ·libc_mkfifoat_trampoline_addr(SB), RODATA, $4
+DATA ·libc_mkfifoat_trampoline_addr(SB)/4, $libc_mkfifoat_trampoline<>(SB)
+
+TEXT libc_mknod_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_mknod(SB)
+
+GLOBL ·libc_mknod_trampoline_addr(SB), RODATA, $4
+DATA ·libc_mknod_trampoline_addr(SB)/4, $libc_mknod_trampoline<>(SB)
+
+TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_mknodat(SB)
+
+GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $4
+DATA ·libc_mknodat_trampoline_addr(SB)/4, $libc_mknodat_trampoline<>(SB)
+
+TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_nanosleep(SB)
+
+GLOBL ·libc_nanosleep_trampoline_addr(SB), RODATA, $4
+DATA ·libc_nanosleep_trampoline_addr(SB)/4, $libc_nanosleep_trampoline<>(SB)
+
+TEXT libc_open_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_open(SB)
+
+GLOBL ·libc_open_trampoline_addr(SB), RODATA, $4
+DATA ·libc_open_trampoline_addr(SB)/4, $libc_open_trampoline<>(SB)
+
+TEXT libc_openat_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_openat(SB)
+
+GLOBL ·libc_openat_trampoline_addr(SB), RODATA, $4
+DATA ·libc_openat_trampoline_addr(SB)/4, $libc_openat_trampoline<>(SB)
+
+TEXT libc_pathconf_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_pathconf(SB)
+
+GLOBL ·libc_pathconf_trampoline_addr(SB), RODATA, $4
+DATA ·libc_pathconf_trampoline_addr(SB)/4, $libc_pathconf_trampoline<>(SB)
+
+TEXT libc_pread_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_pread(SB)
+
+GLOBL ·libc_pread_trampoline_addr(SB), RODATA, $4
+DATA ·libc_pread_trampoline_addr(SB)/4, $libc_pread_trampoline<>(SB)
+
+TEXT libc_pwrite_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_pwrite(SB)
+
+GLOBL ·libc_pwrite_trampoline_addr(SB), RODATA, $4
+DATA ·libc_pwrite_trampoline_addr(SB)/4, $libc_pwrite_trampoline<>(SB)
+
+TEXT libc_read_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_read(SB)
+
+GLOBL ·libc_read_trampoline_addr(SB), RODATA, $4
+DATA ·libc_read_trampoline_addr(SB)/4, $libc_read_trampoline<>(SB)
+
+TEXT libc_readlink_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_readlink(SB)
+
+GLOBL ·libc_readlink_trampoline_addr(SB), RODATA, $4
+DATA ·libc_readlink_trampoline_addr(SB)/4, $libc_readlink_trampoline<>(SB)
+
+TEXT libc_readlinkat_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_readlinkat(SB)
+
+GLOBL ·libc_readlinkat_trampoline_addr(SB), RODATA, $4
+DATA ·libc_readlinkat_trampoline_addr(SB)/4, $libc_readlinkat_trampoline<>(SB)
+
+TEXT libc_rename_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_rename(SB)
+
+GLOBL ·libc_rename_trampoline_addr(SB), RODATA, $4
+DATA ·libc_rename_trampoline_addr(SB)/4, $libc_rename_trampoline<>(SB)
+
+TEXT libc_renameat_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_renameat(SB)
+
+GLOBL ·libc_renameat_trampoline_addr(SB), RODATA, $4
+DATA ·libc_renameat_trampoline_addr(SB)/4, $libc_renameat_trampoline<>(SB)
+
+TEXT libc_revoke_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_revoke(SB)
+
+GLOBL ·libc_revoke_trampoline_addr(SB), RODATA, $4
+DATA ·libc_revoke_trampoline_addr(SB)/4, $libc_revoke_trampoline<>(SB)
+
+TEXT libc_rmdir_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_rmdir(SB)
+
+GLOBL ·libc_rmdir_trampoline_addr(SB), RODATA, $4
+DATA ·libc_rmdir_trampoline_addr(SB)/4, $libc_rmdir_trampoline<>(SB)
+
+TEXT libc_lseek_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_lseek(SB)
+
+GLOBL ·libc_lseek_trampoline_addr(SB), RODATA, $4
+DATA ·libc_lseek_trampoline_addr(SB)/4, $libc_lseek_trampoline<>(SB)
+
+TEXT libc_select_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_select(SB)
+
+GLOBL ·libc_select_trampoline_addr(SB), RODATA, $4
+DATA ·libc_select_trampoline_addr(SB)/4, $libc_select_trampoline<>(SB)
+
+TEXT libc_setegid_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_setegid(SB)
+
+GLOBL ·libc_setegid_trampoline_addr(SB), RODATA, $4
+DATA ·libc_setegid_trampoline_addr(SB)/4, $libc_setegid_trampoline<>(SB)
+
+TEXT libc_seteuid_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_seteuid(SB)
+
+GLOBL ·libc_seteuid_trampoline_addr(SB), RODATA, $4
+DATA ·libc_seteuid_trampoline_addr(SB)/4, $libc_seteuid_trampoline<>(SB)
+
+TEXT libc_setgid_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_setgid(SB)
+
+GLOBL ·libc_setgid_trampoline_addr(SB), RODATA, $4
+DATA ·libc_setgid_trampoline_addr(SB)/4, $libc_setgid_trampoline<>(SB)
+
+TEXT libc_setlogin_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_setlogin(SB)
+
+GLOBL ·libc_setlogin_trampoline_addr(SB), RODATA, $4
+DATA ·libc_setlogin_trampoline_addr(SB)/4, $libc_setlogin_trampoline<>(SB)
+
+TEXT libc_setpgid_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_setpgid(SB)
+
+GLOBL ·libc_setpgid_trampoline_addr(SB), RODATA, $4
+DATA ·libc_setpgid_trampoline_addr(SB)/4, $libc_setpgid_trampoline<>(SB)
+
+TEXT libc_setpriority_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_setpriority(SB)
+
+GLOBL ·libc_setpriority_trampoline_addr(SB), RODATA, $4
+DATA ·libc_setpriority_trampoline_addr(SB)/4, $libc_setpriority_trampoline<>(SB)
+
+TEXT libc_setregid_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_setregid(SB)
+
+GLOBL ·libc_setregid_trampoline_addr(SB), RODATA, $4
+DATA ·libc_setregid_trampoline_addr(SB)/4, $libc_setregid_trampoline<>(SB)
+
+TEXT libc_setreuid_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_setreuid(SB)
+
+GLOBL ·libc_setreuid_trampoline_addr(SB), RODATA, $4
+DATA ·libc_setreuid_trampoline_addr(SB)/4, $libc_setreuid_trampoline<>(SB)
+
+TEXT libc_setresgid_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_setresgid(SB)
+
+GLOBL ·libc_setresgid_trampoline_addr(SB), RODATA, $4
+DATA ·libc_setresgid_trampoline_addr(SB)/4, $libc_setresgid_trampoline<>(SB)
+
+TEXT libc_setresuid_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_setresuid(SB)
+
+GLOBL ·libc_setresuid_trampoline_addr(SB), RODATA, $4
+DATA ·libc_setresuid_trampoline_addr(SB)/4, $libc_setresuid_trampoline<>(SB)
+
+TEXT libc_setrlimit_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_setrlimit(SB)
+
+GLOBL ·libc_setrlimit_trampoline_addr(SB), RODATA, $4
+DATA ·libc_setrlimit_trampoline_addr(SB)/4, $libc_setrlimit_trampoline<>(SB)
+
+TEXT libc_setrtable_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_setrtable(SB)
+
+GLOBL ·libc_setrtable_trampoline_addr(SB), RODATA, $4
+DATA ·libc_setrtable_trampoline_addr(SB)/4, $libc_setrtable_trampoline<>(SB)
+
+TEXT libc_setsid_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_setsid(SB)
+
+GLOBL ·libc_setsid_trampoline_addr(SB), RODATA, $4
+DATA ·libc_setsid_trampoline_addr(SB)/4, $libc_setsid_trampoline<>(SB)
+
+TEXT libc_settimeofday_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_settimeofday(SB)
+
+GLOBL ·libc_settimeofday_trampoline_addr(SB), RODATA, $4
+DATA ·libc_settimeofday_trampoline_addr(SB)/4, $libc_settimeofday_trampoline<>(SB)
+
+TEXT libc_setuid_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_setuid(SB)
+
+GLOBL ·libc_setuid_trampoline_addr(SB), RODATA, $4
+DATA ·libc_setuid_trampoline_addr(SB)/4, $libc_setuid_trampoline<>(SB)
+
+TEXT libc_stat_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_stat(SB)
+
+GLOBL ·libc_stat_trampoline_addr(SB), RODATA, $4
+DATA ·libc_stat_trampoline_addr(SB)/4, $libc_stat_trampoline<>(SB)
+
+TEXT libc_statfs_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_statfs(SB)
+
+GLOBL ·libc_statfs_trampoline_addr(SB), RODATA, $4
+DATA ·libc_statfs_trampoline_addr(SB)/4, $libc_statfs_trampoline<>(SB)
+
+TEXT libc_symlink_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_symlink(SB)
+
+GLOBL ·libc_symlink_trampoline_addr(SB), RODATA, $4
+DATA ·libc_symlink_trampoline_addr(SB)/4, $libc_symlink_trampoline<>(SB)
+
+TEXT libc_symlinkat_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_symlinkat(SB)
+
+GLOBL ·libc_symlinkat_trampoline_addr(SB), RODATA, $4
+DATA ·libc_symlinkat_trampoline_addr(SB)/4, $libc_symlinkat_trampoline<>(SB)
+
+TEXT libc_sync_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_sync(SB)
+
+GLOBL ·libc_sync_trampoline_addr(SB), RODATA, $4
+DATA ·libc_sync_trampoline_addr(SB)/4, $libc_sync_trampoline<>(SB)
+
+TEXT libc_truncate_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_truncate(SB)
+
+GLOBL ·libc_truncate_trampoline_addr(SB), RODATA, $4
+DATA ·libc_truncate_trampoline_addr(SB)/4, $libc_truncate_trampoline<>(SB)
+
+TEXT libc_umask_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_umask(SB)
+
+GLOBL ·libc_umask_trampoline_addr(SB), RODATA, $4
+DATA ·libc_umask_trampoline_addr(SB)/4, $libc_umask_trampoline<>(SB)
+
+TEXT libc_unlink_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_unlink(SB)
+
+GLOBL ·libc_unlink_trampoline_addr(SB), RODATA, $4
+DATA ·libc_unlink_trampoline_addr(SB)/4, $libc_unlink_trampoline<>(SB)
+
+TEXT libc_unlinkat_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_unlinkat(SB)
+
+GLOBL ·libc_unlinkat_trampoline_addr(SB), RODATA, $4
+DATA ·libc_unlinkat_trampoline_addr(SB)/4, $libc_unlinkat_trampoline<>(SB)
+
+TEXT libc_unmount_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_unmount(SB)
+
+GLOBL ·libc_unmount_trampoline_addr(SB), RODATA, $4
+DATA ·libc_unmount_trampoline_addr(SB)/4, $libc_unmount_trampoline<>(SB)
+
+TEXT libc_write_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_write(SB)
+
+GLOBL ·libc_write_trampoline_addr(SB), RODATA, $4
+DATA ·libc_write_trampoline_addr(SB)/4, $libc_write_trampoline<>(SB)
+
+TEXT libc_mmap_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_mmap(SB)
+
+GLOBL ·libc_mmap_trampoline_addr(SB), RODATA, $4
+DATA ·libc_mmap_trampoline_addr(SB)/4, $libc_mmap_trampoline<>(SB)
+
+TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_munmap(SB)
+
+GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $4
+DATA ·libc_munmap_trampoline_addr(SB)/4, $libc_munmap_trampoline<>(SB)
+
+TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_utimensat(SB)
+
+GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $4
+DATA ·libc_utimensat_trampoline_addr(SB)/4, $libc_utimensat_trampoline<>(SB)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go
index 467971eed66..f59b18a9779 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_arm.go
@@ -6,6 +6,7 @@
package unix
+// Deprecated: Use libc wrappers instead of direct syscalls.
const (
SYS_EXIT = 1 // { void sys_exit(int rval); }
SYS_FORK = 2 // { int sys_fork(void); }
diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go
index dea0c9a607d..d9c78cdcbc4 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go
@@ -294,7 +294,7 @@ type PtraceLwpInfoStruct struct {
Flags int32
Sigmask Sigset_t
Siglist Sigset_t
- Siginfo __Siginfo
+ Siginfo __PtraceSiginfo
Tdname [20]int8
Child_pid int32
Syscall_code uint32
@@ -312,6 +312,17 @@ type __Siginfo struct {
Value [4]byte
_ [32]byte
}
+type __PtraceSiginfo struct {
+ Signo int32
+ Errno int32
+ Code int32
+ Pid int32
+ Uid uint32
+ Status int32
+ Addr uintptr
+ Value [4]byte
+ _ [32]byte
+}
type Sigset_t struct {
Val [4]uint32
@@ -350,8 +361,8 @@ type FpExtendedPrecision struct{}
type PtraceIoDesc struct {
Op int32
- Offs *byte
- Addr *byte
+ Offs uintptr
+ Addr uintptr
Len uint32
}
diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go
index da0ea0d608a..26991b16559 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go
@@ -291,7 +291,7 @@ type PtraceLwpInfoStruct struct {
Flags int32
Sigmask Sigset_t
Siglist Sigset_t
- Siginfo __Siginfo
+ Siginfo __PtraceSiginfo
Tdname [20]int8
Child_pid int32
Syscall_code uint32
@@ -310,6 +310,18 @@ type __Siginfo struct {
_ [40]byte
}
+type __PtraceSiginfo struct {
+ Signo int32
+ Errno int32
+ Code int32
+ Pid int32
+ Uid uint32
+ Status int32
+ Addr uintptr
+ Value [8]byte
+ _ [40]byte
+}
+
type Sigset_t struct {
Val [4]uint32
}
@@ -354,8 +366,8 @@ type FpExtendedPrecision struct{}
type PtraceIoDesc struct {
Op int32
- Offs *byte
- Addr *byte
+ Offs uintptr
+ Addr uintptr
Len uint64
}
diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go
index da8f7404509..f8324e7e7f4 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go
@@ -293,7 +293,7 @@ type PtraceLwpInfoStruct struct {
Flags int32
Sigmask Sigset_t
Siglist Sigset_t
- Siginfo __Siginfo
+ Siginfo __PtraceSiginfo
Tdname [20]int8
Child_pid int32
Syscall_code uint32
@@ -312,6 +312,18 @@ type __Siginfo struct {
_ [32]byte
}
+type __PtraceSiginfo struct {
+ Signo int32
+ Errno int32
+ Code int32
+ Pid int32
+ Uid uint32
+ Status int32
+ Addr uintptr
+ Value [4]byte
+ _ [32]byte
+}
+
type Sigset_t struct {
Val [4]uint32
}
@@ -337,8 +349,8 @@ type FpExtendedPrecision struct {
type PtraceIoDesc struct {
Op int32
- Offs *byte
- Addr *byte
+ Offs uintptr
+ Addr uintptr
Len uint32
}
diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go
index d69988e5e58..4220411f341 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go
@@ -291,7 +291,7 @@ type PtraceLwpInfoStruct struct {
Flags int32
Sigmask Sigset_t
Siglist Sigset_t
- Siginfo __Siginfo
+ Siginfo __PtraceSiginfo
Tdname [20]int8
Child_pid int32
Syscall_code uint32
@@ -310,6 +310,18 @@ type __Siginfo struct {
_ [40]byte
}
+type __PtraceSiginfo struct {
+ Signo int32
+ Errno int32
+ Code int32
+ Pid int32
+ Uid uint32
+ Status int32
+ Addr uintptr
+ Value [8]byte
+ _ [40]byte
+}
+
type Sigset_t struct {
Val [4]uint32
}
@@ -334,8 +346,8 @@ type FpExtendedPrecision struct{}
type PtraceIoDesc struct {
Op int32
- Offs *byte
- Addr *byte
+ Offs uintptr
+ Addr uintptr
Len uint64
}
diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go
index d6fd9e88382..0660fd45c7c 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go
@@ -291,7 +291,7 @@ type PtraceLwpInfoStruct struct {
Flags int32
Sigmask Sigset_t
Siglist Sigset_t
- Siginfo __Siginfo
+ Siginfo __PtraceSiginfo
Tdname [20]int8
Child_pid int32
Syscall_code uint32
@@ -310,6 +310,18 @@ type __Siginfo struct {
_ [40]byte
}
+type __PtraceSiginfo struct {
+ Signo int32
+ Errno int32
+ Code int32
+ Pid int32
+ Uid uint32
+ Status int32
+ Addr uintptr
+ Value [8]byte
+ _ [40]byte
+}
+
type Sigset_t struct {
Val [4]uint32
}
@@ -335,8 +347,8 @@ type FpExtendedPrecision struct{}
type PtraceIoDesc struct {
Op int32
- Offs *byte
- Addr *byte
+ Offs uintptr
+ Addr uintptr
Len uint64
}
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go
index 2636044018f..89c516a29ac 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go
@@ -254,6 +254,12 @@ type Sigset_t struct {
const _C__NSIG = 0x41
+const (
+ SIG_BLOCK = 0x0
+ SIG_UNBLOCK = 0x1
+ SIG_SETMASK = 0x2
+)
+
type Siginfo struct {
Signo int32
Errno int32
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go
index 8187489d16f..62b4fb26996 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go
@@ -269,6 +269,12 @@ type Sigset_t struct {
const _C__NSIG = 0x41
+const (
+ SIG_BLOCK = 0x0
+ SIG_UNBLOCK = 0x1
+ SIG_SETMASK = 0x2
+)
+
type Siginfo struct {
Signo int32
Errno int32
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go
index d1612335f4e..e86b35893ec 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go
@@ -245,6 +245,12 @@ type Sigset_t struct {
const _C__NSIG = 0x41
+const (
+ SIG_BLOCK = 0x0
+ SIG_UNBLOCK = 0x1
+ SIG_SETMASK = 0x2
+)
+
type Siginfo struct {
Signo int32
Errno int32
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go
index c28e5556b0a..6c6be4c911d 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go
@@ -248,6 +248,12 @@ type Sigset_t struct {
const _C__NSIG = 0x41
+const (
+ SIG_BLOCK = 0x0
+ SIG_UNBLOCK = 0x1
+ SIG_SETMASK = 0x2
+)
+
type Siginfo struct {
Signo int32
Errno int32
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go
index 187061f9f86..4982ea355a2 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go
@@ -249,6 +249,12 @@ type Sigset_t struct {
const _C__NSIG = 0x41
+const (
+ SIG_BLOCK = 0x0
+ SIG_UNBLOCK = 0x1
+ SIG_SETMASK = 0x2
+)
+
type Siginfo struct {
Signo int32
Errno int32
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go
index 369129917ab..173141a6703 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go
@@ -250,6 +250,12 @@ type Sigset_t struct {
const _C__NSIG = 0x80
+const (
+ SIG_BLOCK = 0x1
+ SIG_UNBLOCK = 0x2
+ SIG_SETMASK = 0x3
+)
+
type Siginfo struct {
Signo int32
Code int32
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go
index 7473468d717..93ae4c51673 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go
@@ -251,6 +251,12 @@ type Sigset_t struct {
const _C__NSIG = 0x80
+const (
+ SIG_BLOCK = 0x1
+ SIG_UNBLOCK = 0x2
+ SIG_SETMASK = 0x3
+)
+
type Siginfo struct {
Signo int32
Code int32
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go
index ed9448524b8..4e4e510ca51 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go
@@ -251,6 +251,12 @@ type Sigset_t struct {
const _C__NSIG = 0x80
+const (
+ SIG_BLOCK = 0x1
+ SIG_UNBLOCK = 0x2
+ SIG_SETMASK = 0x3
+)
+
type Siginfo struct {
Signo int32
Code int32
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go
index 0892a73a4b7..3f5ba013d99 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go
@@ -250,6 +250,12 @@ type Sigset_t struct {
const _C__NSIG = 0x80
+const (
+ SIG_BLOCK = 0x1
+ SIG_UNBLOCK = 0x2
+ SIG_SETMASK = 0x3
+)
+
type Siginfo struct {
Signo int32
Code int32
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go
index e1dd4833332..71dfe7cdb47 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go
@@ -257,6 +257,12 @@ type Sigset_t struct {
const _C__NSIG = 0x41
+const (
+ SIG_BLOCK = 0x0
+ SIG_UNBLOCK = 0x1
+ SIG_SETMASK = 0x2
+)
+
type Siginfo struct {
Signo int32
Errno int32
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go
index d9f654c7b12..3a2b7f0a666 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go
@@ -258,6 +258,12 @@ type Sigset_t struct {
const _C__NSIG = 0x41
+const (
+ SIG_BLOCK = 0x0
+ SIG_UNBLOCK = 0x1
+ SIG_SETMASK = 0x2
+)
+
type Siginfo struct {
Signo int32
Errno int32
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go
index 74acda9fe44..a52d6275632 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go
@@ -258,6 +258,12 @@ type Sigset_t struct {
const _C__NSIG = 0x41
+const (
+ SIG_BLOCK = 0x0
+ SIG_UNBLOCK = 0x1
+ SIG_SETMASK = 0x2
+)
+
type Siginfo struct {
Signo int32
Errno int32
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go
index 50ebe69ebc0..dfc007d8a69 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go
@@ -276,6 +276,12 @@ type Sigset_t struct {
const _C__NSIG = 0x41
+const (
+ SIG_BLOCK = 0x0
+ SIG_UNBLOCK = 0x1
+ SIG_SETMASK = 0x2
+)
+
type Siginfo struct {
Signo int32
Errno int32
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go
index 75b34c25993..b53cb9103d3 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go
@@ -271,6 +271,12 @@ type Sigset_t struct {
const _C__NSIG = 0x41
+const (
+ SIG_BLOCK = 0x0
+ SIG_UNBLOCK = 0x1
+ SIG_SETMASK = 0x2
+)
+
type Siginfo struct {
Signo int32
Errno int32
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go
index 429c3bf7dd8..fe0aa354728 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go
@@ -253,6 +253,12 @@ type Sigset_t struct {
const _C__NSIG = 0x41
+const (
+ SIG_BLOCK = 0x1
+ SIG_UNBLOCK = 0x2
+ SIG_SETMASK = 0x4
+)
+
type Siginfo struct {
Signo int32
Errno int32
diff --git a/vendor/golang.org/x/sys/windows/setupapi_windows.go b/vendor/golang.org/x/sys/windows/setupapi_windows.go
index 14027da3f3f..f8126482fa5 100644
--- a/vendor/golang.org/x/sys/windows/setupapi_windows.go
+++ b/vendor/golang.org/x/sys/windows/setupapi_windows.go
@@ -296,7 +296,7 @@ const (
// Flag to indicate that the sorting from the INF file should be used.
DI_INF_IS_SORTED DI_FLAGS = 0x00008000
- // Flag to indicate that only the the INF specified by SP_DEVINSTALL_PARAMS.DriverPath should be searched.
+ // Flag to indicate that only the INF specified by SP_DEVINSTALL_PARAMS.DriverPath should be searched.
DI_ENUMSINGLEINF DI_FLAGS = 0x00010000
// Flag that prevents ConfigMgr from removing/re-enumerating devices during device
diff --git a/vendor/golang.org/x/sys/windows/syscall.go b/vendor/golang.org/x/sys/windows/syscall.go
index 72074d582f1..8732cdb957f 100644
--- a/vendor/golang.org/x/sys/windows/syscall.go
+++ b/vendor/golang.org/x/sys/windows/syscall.go
@@ -30,8 +30,6 @@ import (
"strings"
"syscall"
"unsafe"
-
- "golang.org/x/sys/internal/unsafeheader"
)
// ByteSliceFromString returns a NUL-terminated slice of bytes
@@ -83,13 +81,7 @@ func BytePtrToString(p *byte) string {
ptr = unsafe.Pointer(uintptr(ptr) + 1)
}
- var s []byte
- h := (*unsafeheader.Slice)(unsafe.Pointer(&s))
- h.Data = unsafe.Pointer(p)
- h.Len = n
- h.Cap = n
-
- return string(s)
+ return string(unsafe.Slice(p, n))
}
// Single-word zero for use when we need a valid pointer to 0 bytes.
diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go
index e27913817a7..5f4f0430e99 100644
--- a/vendor/golang.org/x/sys/windows/syscall_windows.go
+++ b/vendor/golang.org/x/sys/windows/syscall_windows.go
@@ -138,13 +138,7 @@ func UTF16PtrToString(p *uint16) string {
ptr = unsafe.Pointer(uintptr(ptr) + unsafe.Sizeof(*p))
}
- var s []uint16
- h := (*unsafeheader.Slice)(unsafe.Pointer(&s))
- h.Data = unsafe.Pointer(p)
- h.Len = n
- h.Cap = n
-
- return string(utf16.Decode(s))
+ return string(utf16.Decode(unsafe.Slice(p, n)))
}
func Getpagesize() int { return 4096 }
@@ -364,6 +358,15 @@ func NewCallbackCDecl(fn interface{}) uintptr {
//sys SetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error)
//sys GetActiveProcessorCount(groupNumber uint16) (ret uint32)
//sys GetMaximumProcessorCount(groupNumber uint16) (ret uint32)
+//sys EnumWindows(enumFunc uintptr, param unsafe.Pointer) (err error) = user32.EnumWindows
+//sys EnumChildWindows(hwnd HWND, enumFunc uintptr, param unsafe.Pointer) = user32.EnumChildWindows
+//sys GetClassName(hwnd HWND, className *uint16, maxCount int32) (copied int32, err error) = user32.GetClassNameW
+//sys GetDesktopWindow() (hwnd HWND) = user32.GetDesktopWindow
+//sys GetForegroundWindow() (hwnd HWND) = user32.GetForegroundWindow
+//sys IsWindow(hwnd HWND) (isWindow bool) = user32.IsWindow
+//sys IsWindowUnicode(hwnd HWND) (isUnicode bool) = user32.IsWindowUnicode
+//sys IsWindowVisible(hwnd HWND) (isVisible bool) = user32.IsWindowVisible
+//sys GetGUIThreadInfo(thread uint32, info *GUIThreadInfo) (err error) = user32.GetGUIThreadInfo
// Volume Management Functions
//sys DefineDosDevice(flags uint32, deviceName *uint16, targetPath *uint16) (err error) = DefineDosDeviceW
@@ -439,6 +442,10 @@ func NewCallbackCDecl(fn interface{}) uintptr {
//sys RtlAddFunctionTable(functionTable *RUNTIME_FUNCTION, entryCount uint32, baseAddress uintptr) (ret bool) = ntdll.RtlAddFunctionTable
//sys RtlDeleteFunctionTable(functionTable *RUNTIME_FUNCTION) (ret bool) = ntdll.RtlDeleteFunctionTable
+// Desktop Window Manager API (Dwmapi)
+//sys DwmGetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, size uint32) (ret error) = dwmapi.DwmGetWindowAttribute
+//sys DwmSetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, size uint32) (ret error) = dwmapi.DwmSetWindowAttribute
+
// syscall interface implementation for other packages
// GetCurrentProcess returns the handle for the current process.
@@ -1108,9 +1115,13 @@ func Shutdown(fd Handle, how int) (err error) {
}
func WSASendto(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, to Sockaddr, overlapped *Overlapped, croutine *byte) (err error) {
- rsa, l, err := to.sockaddr()
- if err != nil {
- return err
+ var rsa unsafe.Pointer
+ var l int32
+ if to != nil {
+ rsa, l, err = to.sockaddr()
+ if err != nil {
+ return err
+ }
}
return WSASendTo(s, bufs, bufcnt, sent, flags, (*RawSockaddrAny)(unsafe.Pointer(rsa)), l, overlapped, croutine)
}
diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go
index f9eaca528ed..0c4add97410 100644
--- a/vendor/golang.org/x/sys/windows/types_windows.go
+++ b/vendor/golang.org/x/sys/windows/types_windows.go
@@ -3213,3 +3213,48 @@ type ModuleInfo struct {
}
const ALL_PROCESSOR_GROUPS = 0xFFFF
+
+type Rect struct {
+ Left int32
+ Top int32
+ Right int32
+ Bottom int32
+}
+
+type GUIThreadInfo struct {
+ Size uint32
+ Flags uint32
+ Active HWND
+ Focus HWND
+ Capture HWND
+ MenuOwner HWND
+ MoveSize HWND
+ CaretHandle HWND
+ CaretRect Rect
+}
+
+const (
+ DWMWA_NCRENDERING_ENABLED = 1
+ DWMWA_NCRENDERING_POLICY = 2
+ DWMWA_TRANSITIONS_FORCEDISABLED = 3
+ DWMWA_ALLOW_NCPAINT = 4
+ DWMWA_CAPTION_BUTTON_BOUNDS = 5
+ DWMWA_NONCLIENT_RTL_LAYOUT = 6
+ DWMWA_FORCE_ICONIC_REPRESENTATION = 7
+ DWMWA_FLIP3D_POLICY = 8
+ DWMWA_EXTENDED_FRAME_BOUNDS = 9
+ DWMWA_HAS_ICONIC_BITMAP = 10
+ DWMWA_DISALLOW_PEEK = 11
+ DWMWA_EXCLUDED_FROM_PEEK = 12
+ DWMWA_CLOAK = 13
+ DWMWA_CLOAKED = 14
+ DWMWA_FREEZE_REPRESENTATION = 15
+ DWMWA_PASSIVE_UPDATE_MODE = 16
+ DWMWA_USE_HOSTBACKDROPBRUSH = 17
+ DWMWA_USE_IMMERSIVE_DARK_MODE = 20
+ DWMWA_WINDOW_CORNER_PREFERENCE = 33
+ DWMWA_BORDER_COLOR = 34
+ DWMWA_CAPTION_COLOR = 35
+ DWMWA_TEXT_COLOR = 36
+ DWMWA_VISIBLE_FRAME_BORDER_THICKNESS = 37
+)
diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go
index 52d4742cb94..96ba8559c37 100644
--- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go
+++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go
@@ -40,6 +40,7 @@ var (
modadvapi32 = NewLazySystemDLL("advapi32.dll")
modcrypt32 = NewLazySystemDLL("crypt32.dll")
moddnsapi = NewLazySystemDLL("dnsapi.dll")
+ moddwmapi = NewLazySystemDLL("dwmapi.dll")
modiphlpapi = NewLazySystemDLL("iphlpapi.dll")
modkernel32 = NewLazySystemDLL("kernel32.dll")
modmswsock = NewLazySystemDLL("mswsock.dll")
@@ -175,6 +176,8 @@ var (
procDnsNameCompare_W = moddnsapi.NewProc("DnsNameCompare_W")
procDnsQuery_W = moddnsapi.NewProc("DnsQuery_W")
procDnsRecordListFree = moddnsapi.NewProc("DnsRecordListFree")
+ procDwmGetWindowAttribute = moddwmapi.NewProc("DwmGetWindowAttribute")
+ procDwmSetWindowAttribute = moddwmapi.NewProc("DwmSetWindowAttribute")
procGetAdaptersAddresses = modiphlpapi.NewProc("GetAdaptersAddresses")
procGetAdaptersInfo = modiphlpapi.NewProc("GetAdaptersInfo")
procGetBestInterfaceEx = modiphlpapi.NewProc("GetBestInterfaceEx")
@@ -444,9 +447,18 @@ var (
procCommandLineToArgvW = modshell32.NewProc("CommandLineToArgvW")
procSHGetKnownFolderPath = modshell32.NewProc("SHGetKnownFolderPath")
procShellExecuteW = modshell32.NewProc("ShellExecuteW")
+ procEnumChildWindows = moduser32.NewProc("EnumChildWindows")
+ procEnumWindows = moduser32.NewProc("EnumWindows")
procExitWindowsEx = moduser32.NewProc("ExitWindowsEx")
+ procGetClassNameW = moduser32.NewProc("GetClassNameW")
+ procGetDesktopWindow = moduser32.NewProc("GetDesktopWindow")
+ procGetForegroundWindow = moduser32.NewProc("GetForegroundWindow")
+ procGetGUIThreadInfo = moduser32.NewProc("GetGUIThreadInfo")
procGetShellWindow = moduser32.NewProc("GetShellWindow")
procGetWindowThreadProcessId = moduser32.NewProc("GetWindowThreadProcessId")
+ procIsWindow = moduser32.NewProc("IsWindow")
+ procIsWindowUnicode = moduser32.NewProc("IsWindowUnicode")
+ procIsWindowVisible = moduser32.NewProc("IsWindowVisible")
procMessageBoxW = moduser32.NewProc("MessageBoxW")
procCreateEnvironmentBlock = moduserenv.NewProc("CreateEnvironmentBlock")
procDestroyEnvironmentBlock = moduserenv.NewProc("DestroyEnvironmentBlock")
@@ -1525,6 +1537,22 @@ func DnsRecordListFree(rl *DNSRecord, freetype uint32) {
return
}
+func DwmGetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, size uint32) (ret error) {
+ r0, _, _ := syscall.Syscall6(procDwmGetWindowAttribute.Addr(), 4, uintptr(hwnd), uintptr(attribute), uintptr(value), uintptr(size), 0, 0)
+ if r0 != 0 {
+ ret = syscall.Errno(r0)
+ }
+ return
+}
+
+func DwmSetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, size uint32) (ret error) {
+ r0, _, _ := syscall.Syscall6(procDwmSetWindowAttribute.Addr(), 4, uintptr(hwnd), uintptr(attribute), uintptr(value), uintptr(size), 0, 0)
+ if r0 != 0 {
+ ret = syscall.Errno(r0)
+ }
+ return
+}
+
func GetAdaptersAddresses(family uint32, flags uint32, reserved uintptr, adapterAddresses *IpAdapterAddresses, sizePointer *uint32) (errcode error) {
r0, _, _ := syscall.Syscall6(procGetAdaptersAddresses.Addr(), 5, uintptr(family), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(adapterAddresses)), uintptr(unsafe.Pointer(sizePointer)), 0)
if r0 != 0 {
@@ -3802,6 +3830,19 @@ func ShellExecute(hwnd Handle, verb *uint16, file *uint16, args *uint16, cwd *ui
return
}
+func EnumChildWindows(hwnd HWND, enumFunc uintptr, param unsafe.Pointer) {
+ syscall.Syscall(procEnumChildWindows.Addr(), 3, uintptr(hwnd), uintptr(enumFunc), uintptr(param))
+ return
+}
+
+func EnumWindows(enumFunc uintptr, param unsafe.Pointer) (err error) {
+ r1, _, e1 := syscall.Syscall(procEnumWindows.Addr(), 2, uintptr(enumFunc), uintptr(param), 0)
+ if r1 == 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
func ExitWindowsEx(flags uint32, reason uint32) (err error) {
r1, _, e1 := syscall.Syscall(procExitWindowsEx.Addr(), 2, uintptr(flags), uintptr(reason), 0)
if r1 == 0 {
@@ -3810,6 +3851,35 @@ func ExitWindowsEx(flags uint32, reason uint32) (err error) {
return
}
+func GetClassName(hwnd HWND, className *uint16, maxCount int32) (copied int32, err error) {
+ r0, _, e1 := syscall.Syscall(procGetClassNameW.Addr(), 3, uintptr(hwnd), uintptr(unsafe.Pointer(className)), uintptr(maxCount))
+ copied = int32(r0)
+ if copied == 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func GetDesktopWindow() (hwnd HWND) {
+ r0, _, _ := syscall.Syscall(procGetDesktopWindow.Addr(), 0, 0, 0, 0)
+ hwnd = HWND(r0)
+ return
+}
+
+func GetForegroundWindow() (hwnd HWND) {
+ r0, _, _ := syscall.Syscall(procGetForegroundWindow.Addr(), 0, 0, 0, 0)
+ hwnd = HWND(r0)
+ return
+}
+
+func GetGUIThreadInfo(thread uint32, info *GUIThreadInfo) (err error) {
+ r1, _, e1 := syscall.Syscall(procGetGUIThreadInfo.Addr(), 2, uintptr(thread), uintptr(unsafe.Pointer(info)), 0)
+ if r1 == 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
func GetShellWindow() (shellWindow HWND) {
r0, _, _ := syscall.Syscall(procGetShellWindow.Addr(), 0, 0, 0, 0)
shellWindow = HWND(r0)
@@ -3825,6 +3895,24 @@ func GetWindowThreadProcessId(hwnd HWND, pid *uint32) (tid uint32, err error) {
return
}
+func IsWindow(hwnd HWND) (isWindow bool) {
+ r0, _, _ := syscall.Syscall(procIsWindow.Addr(), 1, uintptr(hwnd), 0, 0)
+ isWindow = r0 != 0
+ return
+}
+
+func IsWindowUnicode(hwnd HWND) (isUnicode bool) {
+ r0, _, _ := syscall.Syscall(procIsWindowUnicode.Addr(), 1, uintptr(hwnd), 0, 0)
+ isUnicode = r0 != 0
+ return
+}
+
+func IsWindowVisible(hwnd HWND) (isVisible bool) {
+ r0, _, _ := syscall.Syscall(procIsWindowVisible.Addr(), 1, uintptr(hwnd), 0, 0)
+ isVisible = r0 != 0
+ return
+}
+
func MessageBox(hwnd HWND, text *uint16, caption *uint16, boxtype uint32) (ret int32, err error) {
r0, _, e1 := syscall.Syscall6(procMessageBoxW.Addr(), 4, uintptr(hwnd), uintptr(unsafe.Pointer(text)), uintptr(unsafe.Pointer(caption)), uintptr(boxtype), 0, 0)
ret = int32(r0)
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 2e07d8a8302..e6a32a47fb9 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -33,36 +33,45 @@ github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2018-02-01/resources
github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2018-05-01/resources
github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2016-01-01/storage
github.com/Azure/azure-sdk-for-go/version
-# github.com/Azure/azure-sdk-for-go/sdk/azcore v0.21.1
-## explicit; go 1.16
+# github.com/Azure/azure-sdk-for-go/sdk/azcore v1.1.3
+## explicit; go 1.18
github.com/Azure/azure-sdk-for-go/sdk/azcore
-github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pipeline
+github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud
+github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported
+github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log
github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers
+github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/async
+github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/body
github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc
github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op
github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared
+github.com/Azure/azure-sdk-for-go/sdk/azcore/log
github.com/Azure/azure-sdk-for-go/sdk/azcore/policy
github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime
github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming
github.com/Azure/azure-sdk-for-go/sdk/azcore/to
-# github.com/Azure/azure-sdk-for-go/sdk/internal v0.9.2
-## explicit; go 1.16
+# github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.1.0
+## explicit; go 1.18
+github.com/Azure/azure-sdk-for-go/sdk/azidentity
+# github.com/Azure/azure-sdk-for-go/sdk/internal v1.0.1
+## explicit; go 1.18
github.com/Azure/azure-sdk-for-go/sdk/internal/diag
github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo
github.com/Azure/azure-sdk-for-go/sdk/internal/log
+github.com/Azure/azure-sdk-for-go/sdk/internal/temporal
github.com/Azure/azure-sdk-for-go/sdk/internal/uuid
-# github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.3.0
-## explicit; go 1.16
+# github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.4.1
+## explicit; go 1.18
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal
# github.com/Azure/go-autorest v14.2.0+incompatible
## explicit
github.com/Azure/go-autorest
-# github.com/Azure/go-autorest/autorest v0.11.27
+# github.com/Azure/go-autorest/autorest v0.11.28
## explicit; go 1.15
github.com/Azure/go-autorest/autorest
github.com/Azure/go-autorest/autorest/azure
-# github.com/Azure/go-autorest/autorest/adal v0.9.20
+# github.com/Azure/go-autorest/autorest/adal v0.9.21
## explicit; go 1.15
github.com/Azure/go-autorest/autorest/adal
# github.com/Azure/go-autorest/autorest/azure/auth v0.5.1
@@ -86,6 +95,28 @@ github.com/Azure/go-autorest/logger
# github.com/Azure/go-autorest/tracing v0.6.0
## explicit; go 1.12
github.com/Azure/go-autorest/tracing
+# github.com/AzureAD/microsoft-authentication-library-for-go v0.7.0
+## explicit; go 1.17
+github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache
+github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential
+github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors
+github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base
+github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage
+github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/exported
+github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json
+github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/types/time
+github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/local
+github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth
+github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops
+github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens
+github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority
+github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm
+github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/grant
+github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust
+github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs
+github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared
+github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/version
+github.com/AzureAD/microsoft-authentication-library-for-go/apps/public
# github.com/BurntSushi/toml v0.3.1
## explicit
github.com/BurntSushi/toml
@@ -592,6 +623,9 @@ github.com/jinzhu/now
# github.com/jmespath/go-jmespath v0.4.0
## explicit; go 1.14
github.com/jmespath/go-jmespath
+# github.com/jongio/azidext/go/azidext v0.4.0
+## explicit; go 1.18
+github.com/jongio/azidext/go/azidext
# github.com/josharian/intern v1.0.0
## explicit; go 1.5
github.com/josharian/intern
@@ -607,6 +641,10 @@ github.com/kdomanski/iso9660
# github.com/kr/fs v0.1.0
## explicit
github.com/kr/fs
+# github.com/kylelemons/godebug v1.1.0
+## explicit; go 1.11
+github.com/kylelemons/godebug/diff
+github.com/kylelemons/godebug/pretty
# github.com/leodido/go-urn v1.2.1
## explicit; go 1.13
github.com/leodido/go-urn
@@ -776,6 +814,9 @@ github.com/pelletier/go-toml
## explicit
github.com/pierrec/lz4
github.com/pierrec/lz4/internal/xxh32
+# github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8
+## explicit; go 1.14
+github.com/pkg/browser
# github.com/pkg/errors v0.9.1
## explicit
github.com/pkg/errors
@@ -924,7 +965,7 @@ go.uber.org/zap/internal/bufferpool
go.uber.org/zap/internal/color
go.uber.org/zap/internal/exit
go.uber.org/zap/zapcore
-# golang.org/x/crypto v0.0.0-20220315160706-3147a52a75dd => golang.org/x/crypto v0.0.0-20220315160706-3147a52a75dd
+# golang.org/x/crypto v0.0.0-20220926161630-eccd6366d1be => golang.org/x/crypto v0.0.0-20220315160706-3147a52a75dd
## explicit; go 1.17
golang.org/x/crypto/bcrypt
golang.org/x/crypto/blowfish
@@ -949,7 +990,7 @@ golang.org/x/mod/internal/lazyregexp
golang.org/x/mod/modfile
golang.org/x/mod/module
golang.org/x/mod/semver
-# golang.org/x/net v0.0.0-20220812174116-3211cb980234
+# golang.org/x/net v0.0.0-20221004154528-8021a29435af
## explicit; go 1.17
golang.org/x/net/context
golang.org/x/net/context/ctxhttp
@@ -972,7 +1013,7 @@ golang.org/x/oauth2/jwt
# golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4
## explicit
golang.org/x/sync/semaphore
-# golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab
+# golang.org/x/sys v0.0.0-20220928140112-f11e5e49a4ec
## explicit; go 1.17
golang.org/x/sys/cpu
golang.org/x/sys/execabs